code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import typing
from collections import Counter
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase__ , max_perimeter + 1 ):
lowercase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase__ ):
lowercase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def UpperCamelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
lowercase = pythagorean_triple(lowerCAmelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 101
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase ( _snake_case : Dataset , _snake_case : Dict[str, str] ) ->List[str]:
"""simple docstring"""
__snake_case : str = args.log_outputs
__snake_case : Union[str, Any] = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__snake_case : Dict = load_metric('''wer''' )
__snake_case : str = load_metric('''cer''' )
# compute metrics
__snake_case : List[Any] = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__snake_case : Any = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__snake_case : List[Any] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(_snake_case )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(_snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case : Optional[Any] = f"""log_{dataset_id}_predictions.txt"""
__snake_case : Optional[int] = f"""log_{dataset_id}_targets.txt"""
with open(_snake_case , '''w''' ) as p, open(_snake_case , '''w''' ) as t:
# mapping function to write output
def write_to_file(_snake_case : Dict , _snake_case : List[str] ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(_snake_case , with_indices=_snake_case )
def lowercase ( _snake_case : str ) ->str:
"""simple docstring"""
__snake_case : Any = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case : Dict = re.sub(_snake_case , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case : Union[str, Any] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__snake_case : List[str] = ''' '''.join(text.split(_snake_case ) )
return text
def lowercase ( _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case : Dict = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
__snake_case : Union[str, Any] = dataset.cast_column('''audio''' , Audio(sampling_rate=_snake_case ) )
# load eval pipeline
if args.device is None:
__snake_case : str = 0 if torch.cuda.is_available() else -1
__snake_case : Tuple = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(_snake_case : Union[str, Any] ):
__snake_case : Any = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case : str = prediction['''text''']
__snake_case : Union[str, Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__snake_case : Optional[Any] = dataset.map(_snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_snake_case , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
main(args)
| 102
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ = False
try:
lowerCAmelCase__ = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : str = None ,lowercase__ : list = [] ):
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = '''*'''
else:
__lowercase = '''➔ '''
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] ,3_2 ,lowercase__ )
else:
forceWrite(self.choices[index] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
if index == self.position:
forceWrite(F" {self.arrow_char} " )
self.write_choice(lowercase__ )
else:
forceWrite(F" {self.choices[index]}" )
reset_cursor()
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Direction ,lowercase__ : int = 1 ):
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowercase__ )
move_cursor(lowercase__ ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def SCREAMING_SNAKE_CASE ( self : str ):
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowercase__ )] for number in range(1_0 )] )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,lowercase__ )
else:
return
else:
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt ,'''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' ,'''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' ,'''\n''' )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowercase__ )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position ,'''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,'''UP''' )
clear_line()
self.write_choice(lowercase__ ,'''\n''' )
return choice
| 104
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
a : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : int = crop_size if crop_size is not None else {"height": 256, "width": 256}
a : str = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Dict = do_resize
a : List[Any] = size
a : Any = resample
a : Dict = do_rescale
a : Optional[Any] = rescale_factor
a : List[Any] = do_center_crop
a : List[str] = crop_size
a : int = do_flip_channel_order
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PIL.Image.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : int = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
a : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> np.ndarray:
return flip_channel_order(lowerCAmelCase__ , data_format=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : Dict = do_resize if do_resize is not None else self.do_resize
a : int = resample if resample is not None else self.resample
a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
a : str = size if size is not None else self.size
a : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : Tuple = crop_size if crop_size is not None else self.crop_size
a : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
a : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Any = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : Optional[int] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Optional[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
a : Union[str, Any] = [self.flip_channel_order(image=lowerCAmelCase__ ) for image in images]
a : int = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple:
a : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCAmelCase__ ):
a : Dict = target_sizes.numpy()
a : int = []
for idx in range(len(lowerCAmelCase__ ) ):
a : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCAmelCase__ )
a : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
a : List[str] = logits.argmax(dim=1 )
a : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 105
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
lowerCAmelCase__ : Optional[Any] = len(A_ )
lowerCAmelCase__ : Tuple = max(A_ )
lowerCAmelCase__ : Optional[int] = min(A_ )
# create the counting array
lowerCAmelCase__ : Optional[int] = coll_max + 1 - coll_min
lowerCAmelCase__ : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , A_ ):
lowerCAmelCase__ : Dict = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase__ : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , A_ ) ):
lowerCAmelCase__ : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __SCREAMING_SNAKE_CASE ( A_ ):
return "".join([chr(A_ ) for i in counting_sort([ord(A_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__UpperCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 106
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = '▁'
__lowerCAmelCase : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase : Tuple = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
__lowerCAmelCase : List[Any] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
__lowerCAmelCase : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Tuple="<s>" , __lowerCamelCase : Union[str, Any]="<unk>" , __lowerCamelCase : Tuple="<pad>" , __lowerCamelCase : Optional[int]="<mask>" , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : Dict=None , __lowerCamelCase : str=False , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
a = legacy_behaviour
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenizer_file=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a = 1
a = len(self.sp_model )
a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__lowerCamelCase )
}
a = {v: k for k, v in self.lang_code_to_id.items()}
a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a = src_lang if src_lang is not None else "eng_Latn"
a = self.lang_code_to_id[self._src_lang]
a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ) -> Dict:
a = self.__dict__.copy()
a = None
a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , __lowerCamelCase : List[str] ) -> Tuple:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self : List[str] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ) -> None:
a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
a = [1] * len(self.prefix_tokens )
a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : List[str] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a = src_lang
a = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : str , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str = "eng_Latn" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "fra_Latn" , **__lowerCamelCase : Optional[int] , ) -> BatchEncoding:
a = src_lang
a = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Any ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> None:
a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a = []
a = [self.eos_token_id, self.cur_lang_code]
else:
a = [self.cur_lang_code]
a = [self.eos_token_id]
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ) -> None:
a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a = []
a = [self.eos_token_id, self.cur_lang_code]
else:
a = [self.cur_lang_code]
a = [self.eos_token_id]
| 107
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( SCREAMING_SNAKE_CASE : Dataset , SCREAMING_SNAKE_CASE : Dict[str, str] ):
'''simple docstring'''
lowerCAmelCase : Dict = args.log_outputs
lowerCAmelCase : List[str] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
lowerCAmelCase : int = load_metric("wer" )
lowerCAmelCase : str = load_metric("cer" )
# compute metrics
lowerCAmelCase : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
lowerCAmelCase : str = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
lowerCAmelCase : List[str] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(SCREAMING_SNAKE_CASE )
with open(f"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase : Optional[Any] = f"""log_{dataset_id}_predictions.txt"""
lowerCAmelCase : Tuple = f"""log_{dataset_id}_targets.txt"""
with open(SCREAMING_SNAKE_CASE , "w" ) as p, open(SCREAMING_SNAKE_CASE , "w" ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
p.write(f"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase : List[str] = re.sub(SCREAMING_SNAKE_CASE , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase : Dict = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
lowerCAmelCase : List[str] = " ".join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase : Dict = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase : Any = dataset.cast_column("audio" , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase : int = 0 if torch.cuda.is_available() else -1
lowerCAmelCase : Union[str, Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase : List[Any] = prediction["text"]
lowerCAmelCase : int = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
lowerCAmelCase : List[str] = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 108
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A: Any = logging.get_logger(__name__)
A: Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
A: Tuple = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : str = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[Any] = RobertaTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : List[str] = add_prefix_space
UpperCAmelCase : Optional[Any] = pre_tok_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = add_prefix_space
UpperCAmelCase : Optional[int] = """post_processor"""
UpperCAmelCase : str = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase : int = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase : Dict = tuple(state["""cls"""] )
UpperCAmelCase : Optional[Any] = False
if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
UpperCAmelCase : Optional[int] = add_prefix_space
UpperCAmelCase : Union[str, Any] = True
if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets:
UpperCAmelCase : int = trim_offsets
UpperCAmelCase : Union[str, Any] = True
if changes_to_apply:
UpperCAmelCase : List[Any] = getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
UpperCAmelCase : Optional[int] = component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
UpperCAmelCase : Any = value
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : Tuple = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : str = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 109
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[Any] =ort.SessionOptions()
lowerCamelCase_ : Union[str, Any] =False
return options
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Any =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowerCamelCase_ : Any =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowerCamelCase_ : Dict =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
lowerCamelCase_ : str =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="A red cat sitting on a park bench"
lowerCamelCase_ : List[str] =np.random.RandomState(0 )
lowerCamelCase_ : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Any =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 144
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _A( snake_case_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = 'trajectory_transformer'
UpperCamelCase : List[str] = ['past_key_values']
UpperCamelCase : List[str] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _A=100 , _A=5 , _A=1 , _A=1 , _A=249 , _A=6 , _A=17 , _A=25 , _A=4 , _A=4 , _A=128 , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0_0_0_6 , _A=512 , _A=0.0_2 , _A=1e-1_2 , _A=1 , _A=True , _A=1 , _A=50256 , _A=50256 , **_A , ):
__A : Optional[Any] = vocab_size
__A : Any = action_weight
__A : List[str] = reward_weight
__A : int = value_weight
__A : List[Any] = max_position_embeddings
__A : List[Any] = block_size
__A : Optional[int] = action_dim
__A : str = observation_dim
__A : List[Any] = transition_dim
__A : Union[str, Any] = learning_rate
__A : int = n_layer
__A : Optional[Any] = n_head
__A : Optional[Any] = n_embd
__A : List[str] = embd_pdrop
__A : List[Any] = attn_pdrop
__A : str = resid_pdrop
__A : Union[str, Any] = initializer_range
__A : Optional[int] = layer_norm_eps
__A : List[str] = kaiming_initializer_range
__A : Tuple = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def _lowerCamelCase ( lowercase : str ) -> Any:
for char in word:
_a = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def _lowerCamelCase ( lowercase : List[str] ) -> List[str]:
_a = set()
for token in tokens:
_a = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
_a = list(__UpperCamelCase )
return word_list
def _lowerCamelCase ( lowercase : List[str] , lowercase : set() ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
_a = max([len(__UpperCamelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(__UpperCamelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
_a = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a = "##" + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase ( lowercase : List[str] , lowercase : LTP , lowercase : BertTokenizer ) -> Union[str, Any]:
_a = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
_a = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_a = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
_a = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
_a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
_a = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
_a = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def _lowerCamelCase ( lowercase : int ) -> Any:
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_a = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
main(args)
| 63
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
UpperCAmelCase_ : Tuple = '2020.9.26'
UpperCAmelCase_ : Any = 'xcodz-dot, cclaus, dhruvmanila'
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not all(isinstance(__UpperCamelCase , (float, int) ) for val in locals().values() ):
_SCREAMING_SNAKE_CASE : Tuple = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Any = ((x * distance) / (z + distance)) * scale
_SCREAMING_SNAKE_CASE : Union[str, Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Axis must be a str""" )
_SCREAMING_SNAKE_CASE : List[str] = locals()
del input_variables["axis"]
if not all(isinstance(__UpperCamelCase , (float, int) ) for val in input_variables.values() ):
_SCREAMING_SNAKE_CASE : List[str] = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_SCREAMING_SNAKE_CASE : Any = x * math.cos(__UpperCamelCase ) - y * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = y * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = z
elif axis == "x":
_SCREAMING_SNAKE_CASE : Dict = y * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = z * math.cos(__UpperCamelCase ) + y * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = x
elif axis == "y":
_SCREAMING_SNAKE_CASE : Tuple = x * math.cos(__UpperCamelCase ) - z * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Dict = z * math.cos(__UpperCamelCase ) + x * math.sin(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : int = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 200
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Tuple, _lowerCamelCase : Any, _lowerCamelCase : Dict=13, _lowerCamelCase : str=30, _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Any=3, _lowerCamelCase : List[Any]=True, _lowerCamelCase : Optional[int]=True, _lowerCamelCase : Tuple=32, _lowerCamelCase : Dict=2, _lowerCamelCase : Dict=4, _lowerCamelCase : Dict=37, _lowerCamelCase : Optional[int]="gelu", _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : Dict=0.1, _lowerCamelCase : Optional[int]=10, _lowerCamelCase : int=0.02, _lowerCamelCase : Tuple=3, _lowerCamelCase : int=0.6, _lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = mask_ratio
__A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A = (image_size // patch_size) ** 2
__A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=_lowerCamelCase, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : int ):
'''simple docstring'''
__A = TFViTMAEModel(config=_lowerCamelCase )
__A = model(_lowerCamelCase, training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Dict, _lowerCamelCase : str, _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = TFViTMAEForPreTraining(_lowerCamelCase )
__A = model(_lowerCamelCase, training=_lowerCamelCase )
# expected sequence length = num_patches
__A = (self.image_size // self.patch_size) ** 2
__A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A = 1
__A = TFViTMAEForPreTraining(_lowerCamelCase )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(_lowerCamelCase, training=_lowerCamelCase )
__A = self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A)) = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
A_ : Union[str, Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
A_ : str = False
A_ : List[Any] = False
A_ : List[str] = False
A_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = TFViTMAEModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase, hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase, tf.keras.layers.Layer ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# make the mask reproducible
np.random.seed(2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = int((config.image_size // config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = model(_lowerCamelCase, noise=_lowerCamelCase )
__A = copy.deepcopy(self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) )
__A = model(**_lowerCamelCase, noise=_lowerCamelCase )
__A = outputs_dict[0].numpy()
__A = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ), 1e-6 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
# make the mask reproducible
np.random.seed(2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = int((config.image_size // config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCamelCase : str ):
__A = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCamelCase ):
__A = v.numpy()
else:
__A = np.array(_lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = prepare_numpy_arrays(_lowerCamelCase )
__A = model(_lowerCamelCase, noise=_lowerCamelCase )
__A = model(**_lowerCamelCase, noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : List[Any], _lowerCamelCase : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
# make masks reproducible
np.random.seed(2 )
__A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A = tf.constant(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A = tf_noise
super().check_pt_tf_models(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCamelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_lowerCamelCase, _lowerCamelCase ),)
if isinstance(_lowerCamelCase, _lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCamelCase, '''_keras_serializable''', _lowerCamelCase )
}
__A = int((config.image_size // config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A = tf.convert_to_tensor(_lowerCamelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__A = main_layer_class(_lowerCamelCase )
__A = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__A = tf.keras.Model(_lowerCamelCase, outputs=main_layer(_lowerCamelCase ) )
__A = model(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__A = os.path.join(_lowerCamelCase, '''keras_model.h5''' )
model.save(_lowerCamelCase )
__A = tf.keras.models.load_model(
_lowerCamelCase, custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCamelCase, tf.keras.Model )
__A = model(_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase, _lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = int((config.image_size // config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = model(_lowerCamelCase, noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
__A = outputs.last_hidden_state.numpy()
__A = 0
else:
__A = outputs.logits.numpy()
__A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase, saved_model=_lowerCamelCase )
__A = model_class.from_pretrained(_lowerCamelCase )
__A = model(_lowerCamelCase, noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
__A = after_outputs['''last_hidden_state'''].numpy()
__A = 0
else:
__A = after_outputs['''logits'''].numpy()
__A = 0
__A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase, 1e-5 )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
# make mask reproducible
np.random.seed(2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = int((config.image_size // config.patch_size) ** 2 )
__A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = model(_lowerCamelCase, noise=_lowerCamelCase )
__A = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCamelCase )
__A = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__A = model_class.from_config(model.config )
__A = new_model(_lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
__A = new_model(_lowerCamelCase, noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase, _lowerCamelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.''' )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__A = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=_lowerCamelCase, return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A = ViTMAEConfig()
__A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A = np.random.uniform(size=(1, num_patches) )
# forward pass
__A = model(**_lowerCamelCase, noise=_lowerCamelCase )
# verify the logits
__A = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3], _lowerCamelCase, atol=1e-4 )
| 266
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __lowerCAmelCase ( metaclass=snake_case_ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 316
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class A_ ( snake_case_ ):
_lowercase : Optional[Any] = 'codegen'
_lowercase : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , UpperCAmelCase : str=5_0_4_0_0 , UpperCAmelCase : str=2_0_4_8 , UpperCAmelCase : Optional[Any]=2_0_4_8 , UpperCAmelCase : Tuple=4_0_9_6 , UpperCAmelCase : str=2_8 , UpperCAmelCase : int=1_6 , UpperCAmelCase : int=6_4 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]="gelu_new" , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=1E-5 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=5_0_2_5_6 , UpperCAmelCase : Dict=5_0_2_5_6 , UpperCAmelCase : Dict=False , **UpperCAmelCase : Any , ) -> Tuple:
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: Optional[Any] = n_ctx
__lowerCAmelCase: int = n_positions
__lowerCAmelCase: Optional[Any] = n_embd
__lowerCAmelCase: str = n_layer
__lowerCAmelCase: Tuple = n_head
__lowerCAmelCase: List[Any] = n_inner
__lowerCAmelCase: int = rotary_dim
__lowerCAmelCase: Tuple = activation_function
__lowerCAmelCase: int = resid_pdrop
__lowerCAmelCase: Dict = embd_pdrop
__lowerCAmelCase: Dict = attn_pdrop
__lowerCAmelCase: Optional[int] = layer_norm_epsilon
__lowerCAmelCase: List[str] = initializer_range
__lowerCAmelCase: Dict = use_cache
__lowerCAmelCase: List[Any] = bos_token_id
__lowerCAmelCase: Optional[Any] = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class A_ ( snake_case_ ):
def __init__( self : Optional[int] , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ) -> Any:
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , 'pad_token_id' , UpperCAmelCase ):
# TODO: how to do that better?
__lowerCAmelCase: List[Any] = 0
@property
def UpperCAmelCase ( self : Dict ) -> Any:
__lowerCAmelCase: Any = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='inputs' )
__lowerCAmelCase: List[str] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase: str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self._config.n_layer
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return self._config.n_head
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase: str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase: Dict = seqlen + 2
__lowerCAmelCase: str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase: Optional[Any] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
__lowerCAmelCase: Optional[int] = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase: Any = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase: List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self : str ) -> int:
return 1_3
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case_ ):
UpperCAmelCase__ : str = ['pixel_values']
def __init__( self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = IMAGENET_DEFAULT_MEAN, SCREAMING_SNAKE_CASE_ = IMAGENET_DEFAULT_STD, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = size if size is not None else {'shortest_edge': 224}
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_, param_name='crop_size' )
UpperCamelCase : int = do_resize
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : Tuple = resample
UpperCamelCase : Optional[int] = do_center_crop
UpperCamelCase : Dict = crop_size
UpperCamelCase : List[Any] = do_rescale
UpperCamelCase : Tuple = rescale_factor
UpperCamelCase : Optional[int] = do_normalize
UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase : int = int((256 / 224) * size['shortest_edge'] )
UpperCamelCase : int = get_resize_output_image_size(SCREAMING_SNAKE_CASE_, size=SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" )
return resize(
SCREAMING_SNAKE_CASE_, size=(size_dict['height'], size_dict['width']), resample=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_, size=(size['height'], size['width']), data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Any:
return rescale(SCREAMING_SNAKE_CASE_, scale=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> int:
return normalize(SCREAMING_SNAKE_CASE_, mean=SCREAMING_SNAKE_CASE_, std=SCREAMING_SNAKE_CASE_, data_format=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Union[str, Any] = resample if resample is not None else self.resample
UpperCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : int = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Any = image_std if image_std is not None else self.image_std
UpperCamelCase : Dict = size if size is not None else self.size
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_, default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_, param_name='crop_size' )
UpperCamelCase : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase : Any = [self.resize(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase : Dict = [self.center_crop(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : Union[str, Any] = [self.rescale(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase : Union[str, Any] = [self.normalize(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : List[Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_, tensor_type=SCREAMING_SNAKE_CASE_ )
| 119
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Union[str, Any] = LxmertConfig.from_json_file(__UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
_A : Dict = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase,__UpperCamelCase,__UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),__UpperCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 26
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : List[Any] = tempfile.mkdtemp()
# fmt: off
lowercase__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowercase__ : Dict = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict , **lowercase_ : Tuple ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , **lowercase_ : Dict ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Optional[int] ) -> str:
lowercase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ : Tuple = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_image_processor()
lowercase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase__ : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : str = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : Tuple = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__ : Union[str, Any] = "lower newer"
lowercase__ : Union[str, Any] = processor(text=lowercase_ )
lowercase__ : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__ : int = "lower newer"
lowercase__ : int = self.prepare_image_inputs()
lowercase__ : int = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(lowercase_ ):
processor()
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : List[str] = processor.batch_decode(lowercase_ )
lowercase__ : Dict = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : str = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__ : List[str] = "lower newer"
lowercase__ : Tuple = self.prepare_image_inputs()
lowercase__ : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(A)
return image
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A)
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
def extract(*A : Tuple , **A : Tuple):
class __lowerCAmelCase :
def __init__( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.ones([0])
def _lowerCamelCase ( self : List[Any] , A : List[str]) -> str:
"""simple docstring"""
self.pixel_values.to(A)
return self
return Out()
return extract
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.dummy_cond_unet
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=A , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.dummy_cond_unet
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=A)
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=A , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=A)
assert isinstance(A , A)
assert isinstance(pipe.scheduler , A)
assert pipe.safety_checker is None
_UpperCAmelCase = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A)
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(A)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_cond_unet
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=A)
_UpperCAmelCase = self.dummy_vae
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
# put models in fp16
_UpperCAmelCase = unet.half()
_UpperCAmelCase = vae.half()
_UpperCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='np').images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=A)
_UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_UpperCAmelCase = 40_03_66_03_46
_UpperCAmelCase = 7
# without safety guidance (sld_guidance_scale = 0)
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
# without safety guidance (strong configuration)
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=A)
_UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 'padme amidala taking a bath artwork, safe for work, no nudity'
_UpperCAmelCase = 27_34_97_17_55
_UpperCAmelCase = 7
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5')
_UpperCAmelCase = sd_pipe.to(A)
sd_pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_UpperCAmelCase = 10_44_35_52_34
_UpperCAmelCase = 12
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-7
_UpperCAmelCase = torch.manual_seed(A)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1])
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 339
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
A__ : Optional[Any] = None
try:
import msvcrt
except ImportError:
A__ : List[Any] = None
try:
import fcntl
except ImportError:
A__ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
A__ : List[str] = OSError
# Data
# ------------------------------------------------
A__ : str = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
A__ : List[Any] = '3.0.12'
A__ : Optional[int] = None
def _snake_case ( ) -> int:
global _logger
lowerCamelCase_ : Optional[Any] =_logger or logging.getLogger(__name__ )
return _logger
class lowercase__ ( snake_case_ ):
def __init__( self : Any , snake_case__ : List[str] ):
lowerCamelCase_ : List[Any] =lock_file
return None
def __str__( self : Dict ):
lowerCamelCase_ : Dict =F"""The file lock \'{self.lock_file}\' could not be acquired."""
return temp
class lowercase__ :
def __init__( self : Tuple , snake_case__ : List[str] ):
lowerCamelCase_ : Optional[int] =lock
return None
def __enter__( self : List[str] ):
return self.lock
def __exit__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : str ):
self.lock.release()
return None
class lowercase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int=-1 , snake_case__ : int=None ):
lowerCamelCase_ : Optional[int] =max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCamelCase_ : Any =self.hash_filename_if_too_long(snake_case__ , snake_case__ )
# The path to the lock file.
lowerCamelCase_ : str =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCamelCase_ : Dict =None
# The default timeout value.
lowerCamelCase_ : List[str] =timeout
# We use this lock primarily for the lock counter.
lowerCamelCase_ : List[str] =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCamelCase_ : int =0
return None
@property
def UpperCAmelCase__ ( self : int ):
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
lowerCamelCase_ : Tuple =float(snake_case__ )
return None
def UpperCAmelCase__ ( self : Union[str, Any] ):
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ):
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowerCamelCase_ : int =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCamelCase_ : Union[str, Any] =id(self )
lowerCamelCase_ : int =self._lock_file
lowerCamelCase_ : List[str] =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(snake_case__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCamelCase_ : List[str] =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[Any]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCamelCase_ : Dict =id(self )
lowerCamelCase_ : Any =self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowerCamelCase_ : str =0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Tuple ):
self.release()
return None
def __del__( self : Optional[Any] ):
self.release(force=snake_case__ )
return None
def UpperCAmelCase__ ( self : Dict , snake_case__ : str , snake_case__ : int ):
lowerCamelCase_ : str =os.path.basename(snake_case__ )
if len(snake_case__ ) > max_length and max_length > 0:
lowerCamelCase_ : Optional[Any] =os.path.dirname(snake_case__ )
lowerCamelCase_ : Dict =str(hash(snake_case__ ) )
lowerCamelCase_ : Optional[int] =filename[: max_length - len(snake_case__ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(snake_case__ , snake_case__ )
else:
return path
class lowercase__ ( snake_case_ ):
def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=-1 , snake_case__ : Dict=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
lowerCamelCase_ : Any ="\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Tuple =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCamelCase_ : Dict =os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case__ )
else:
lowerCamelCase_ : Union[str, Any] =fd
return None
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =self._lock_file_fd
lowerCamelCase_ : Dict =None
msvcrt.locking(snake_case__ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowercase__ ( snake_case_ ):
def __init__( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[str]=-1 , snake_case__ : str=None ):
lowerCamelCase_ : Dict =os.statvfs(os.path.dirname(snake_case__ ) ).f_namemax
super().__init__(snake_case__ , timeout=snake_case__ , max_filename_length=snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : str =os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCamelCase_ : Optional[int] =os.open(self._lock_file , snake_case__ )
try:
fcntl.flock(snake_case__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case__ )
else:
lowerCamelCase_ : int =fd
return None
def UpperCAmelCase__ ( self : str ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowerCamelCase_ : Optional[int] =self._lock_file_fd
lowerCamelCase_ : List[str] =None
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
os.close(snake_case__ )
return None
class lowercase__ ( snake_case_ ):
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCamelCase_ : Tuple =os.open(self._lock_file , snake_case__ )
except OSError:
pass
else:
lowerCamelCase_ : str =fd
return None
def UpperCAmelCase__ ( self : Optional[int] ):
os.close(self._lock_file_fd )
lowerCamelCase_ : Union[str, Any] =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
A__ : Tuple = None
if msvcrt:
A__ : Tuple = WindowsFileLock
elif fcntl:
A__ : Optional[Any] = UnixFileLock
else:
A__ : str = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 144
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase : str = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['BeitFeatureExtractor']
UpperCAmelCase : Tuple = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> Tuple:
_a = len(__UpperCamelCase )
_a = len(__UpperCamelCase )
_a = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_a = []
for char_count in range(__UpperCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 63
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : Optional[Any] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(__snake_case , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_SCREAMING_SNAKE_CASE : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_SCREAMING_SNAKE_CASE : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_SCREAMING_SNAKE_CASE : str = black.format_str(__snake_case , mode=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__snake_case , """w""" , newline="""\n""" ) as f:
f.write(__snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__snake_case )
with open(__snake_case , """r""" ) as f:
self.assertTrue(f.read() , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __snake_case , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __snake_case ) , )
# Copy consistency with a really long name
_SCREAMING_SNAKE_CASE : Tuple = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub("""Bert""" , __snake_case , __snake_case ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __snake_case , overwrite_result=re.sub("""Bert""" , """TestModel""" , __snake_case ) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
_SCREAMING_SNAKE_CASE : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
_SCREAMING_SNAKE_CASE : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : str = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
self.assertFalse(__snake_case )
self.assertEqual(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
_SCREAMING_SNAKE_CASE : List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE : Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = check_copies.convert_to_localized_md(
__snake_case , __snake_case , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__snake_case , __snake_case )
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{\"default\": {\"dataset_size\": 42}}''' )
__A = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ),
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
__A = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase , '''dataset_info.json''' ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
__A = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__A = yaml.safe_dump(__UpperCamelCase )
__A = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase ( ):
"""simple docstring"""
__A = DatasetInfo()
__A = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
__A = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__A = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__A = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase , '''README.md''' ) )
| 266
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
"""simple docstring"""
def A ( snake_case :int = 1_0_0_0 ) -> str:
__UpperCamelCase = 2**power
__UpperCamelCase = 0
while n:
__UpperCamelCase , __UpperCamelCase = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A_ ( snake_case_ ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any]=1_0_2_4 , UpperCAmelCase : List[str]=1_0_2_4 , UpperCAmelCase : List[Any]=3.6 ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = tokenizer
__lowerCAmelCase: Any = tokenizer.bos_token_id
__lowerCAmelCase: Optional[int] = dataset
__lowerCAmelCase: Dict = seq_length
__lowerCAmelCase: Dict = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Dict ) -> int:
__lowerCAmelCase: int = iter(self.dataset )
__lowerCAmelCase: List[str] = True
while more_examples:
__lowerCAmelCase , __lowerCAmelCase: int = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCAmelCase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCAmelCase: Union[str, Any] = False
break
__lowerCAmelCase: Optional[Any] = tokenizer(UpperCAmelCase , truncation=UpperCAmelCase )['input_ids']
__lowerCAmelCase: List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCAmelCase ) , self.seq_length ):
__lowerCAmelCase: int = all_token_ids[i : i + self.seq_length]
if len(UpperCAmelCase ) == self.seq_length:
yield torch.tensor(UpperCAmelCase )
def _a ( SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = {'streaming': True}
__lowerCAmelCase: int = load_dataset(args.dataset_name , split='train' , **__UpperCamelCase )
__lowerCAmelCase: List[Any] = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length )
__lowerCAmelCase: Tuple = DataLoader(__UpperCamelCase , batch_size=args.batch_size )
return eval_dataloader
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
model.eval()
__lowerCAmelCase: int = []
for step, batch in enumerate(__UpperCamelCase ):
with torch.no_grad():
__lowerCAmelCase: List[Any] = model(__UpperCamelCase , labels=__UpperCamelCase )
__lowerCAmelCase: Any = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCAmelCase: Tuple = torch.mean(torch.cat(__UpperCamelCase ) )
try:
__lowerCAmelCase: Any = torch.exp(__UpperCamelCase )
except OverflowError:
__lowerCAmelCase: List[str] = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_a = Accelerator()
# Parse configuration
_a = HfArgumentParser(EvaluationArguments)
_a = parser.parse_args()
set_seed(args.seed)
# Logging
_a = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
_a = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_a = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_a = create_dataloader(args)
# Prepare everything with our `accelerator`.
_a = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
_a = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 322
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCamelCase ( ) -> int:
UpperCamelCase : List[Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
UpperCamelCase : Union[str, Any] = Dataset.from_dict(__UpperCamelCase )
return dataset
class lowerCAmelCase_ ( snake_case_ ):
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = get_dataset()
UpperCamelCase : Dict = make_duplicate_clusters(SCREAMING_SNAKE_CASE_, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = get_dataset()
UpperCamelCase , UpperCamelCase : Dict = deduplicate_dataset(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), 2 )
print(SCREAMING_SNAKE_CASE_ )
self.assertEqual(duplicate_clusters[0][0]['copies'], 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'], SCREAMING_SNAKE_CASE_ )
| 119
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowercase :
_a = None
_a = False
_a = False
_a = False
_a = None
_a = None
_a = False
_a = False
_a = False
_a = True
_a = None
_a = 1
_a = None
_a = False
_a = None
_a = None
def a__ ( self ) -> Tuple:
return self.__class__(**{k: copy.deepcopy(_a ) for k, v in self.__dict__.items()} )
| 26
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class snake_case_ ( snake_case_ ):
__A : Dict = 'ernie_m'
__A : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , lowercase_ : int = 25_00_02 , lowercase_ : int = 7_68 , lowercase_ : int = 12 , lowercase_ : int = 12 , lowercase_ : int = 30_72 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 5_14 , lowercase_ : float = 0.02 , lowercase_ : int = 1 , lowercase_ : float = 1E-05 , lowercase_ : Dict=None , lowercase_ : Any=False , lowercase_ : Tuple=0.0 , **lowercase_ : str , ) -> int:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
lowercase__ : List[Any] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Optional[int] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : Any = classifier_dropout
lowercase__ : Dict = is_decoder
lowercase__ : Optional[Any] = act_dropout
| 87
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
UpperCAmelCase__ = make_dataset()
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
for triplet in permutations(__UpperCamelCase , 3 ):
if sum(__UpperCamelCase ) == target:
return tuple(sorted(__UpperCamelCase ) )
return (0, 0, 0)
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
arr.sort()
_UpperCAmelCase = len(__UpperCamelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__UpperCamelCase , stmt=__UpperCamelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__UpperCamelCase , stmt=__UpperCamelCase , repeat=5 , number=10_000 )
return (min(__UpperCamelCase ), min(__UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 339
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case ( lowerCamelCase__ : str="ro" , lowerCamelCase__ : Optional[int]="en" , lowerCamelCase__ : Optional[int]="wmt16" , lowerCamelCase__ : int=None ) -> List[Any]:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowerCamelCase_ : str =F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
lowerCamelCase_ : int =datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
lowerCamelCase_ : str =F"""{dataset}-{pair}"""
lowerCamelCase_ : Optional[int] =Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCamelCase_ : int ="val" if split == "validation" else split
lowerCamelCase_ : List[Any] =save_dir.joinpath(F"""{fn}.source""" )
lowerCamelCase_ : Optional[int] =save_dir.joinpath(F"""{fn}.target""" )
lowerCamelCase_ : Any =src_path.open("w+" )
lowerCamelCase_ : int =tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCamelCase_ : List[Any] =x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 144
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase : Dict = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, '''r''', encoding='''utf-8''') as f:
UpperCAmelCase : str = json.load(f)
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
return FSMTTokenizer.from_pretrained(_A )
def UpperCAmelCase_ ( self , _A ):
__A : int = FSMTForConditionalGeneration.from_pretrained(_A ).to(_A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 2_6.0],
['ru-en', 2_2.0],
['en-de', 2_2.0],
['de-en', 2_9.0],
] )
@slow
def UpperCAmelCase_ ( self , _A , _A ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__A : Tuple = F"""facebook/wmt19-{pair}"""
__A : List[Any] = self.get_tokenizer(_A )
__A : Union[str, Any] = self.get_model(_A )
__A : Any = bleu_data[pair]['src']
__A : Any = bleu_data[pair]['tgt']
__A : Tuple = tokenizer(_A , return_tensors='pt' , truncation=_A , padding='longest' ).to(_A )
__A : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__A : str = tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
__A : Dict = calculate_bleu(_A , _A )
print(_A )
self.assertGreaterEqual(scores['bleu'] , _A )
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : Dict ) -> List[str]:
_a = r"\w+[.]\d+"
_a = re.findall(__UpperCamelCase , __UpperCamelCase )
for pat in pats:
_a = key.replace(__UpperCamelCase , "_".join(pat.split("." ) ) )
return key
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : int ) -> List[Any]:
_a = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_a = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_a = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_a = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_a = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_a = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_a = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _lowerCamelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : int=42 ) -> Union[str, Any]:
_a = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_a = flax_model.init_weights(PRNGKey(__UpperCamelCase ) )
_a = flatten_dict(__UpperCamelCase )
_a = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_a = rename_key(__UpperCamelCase )
_a = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
_a , _a = rename_key_and_reshape_tensor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
_a = jnp.asarray(__UpperCamelCase )
return unflatten_dict(__UpperCamelCase )
| 63
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : Dict = 'gpt_bigcode'
A_ : Dict = ['past_key_values']
A_ : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=5_0257 , __snake_case=1024 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=None , __snake_case="gelu_pytorch_tanh" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-5 , __snake_case=0.02 , __snake_case=True , __snake_case=True , __snake_case=5_0256 , __snake_case=5_0256 , __snake_case=True , __snake_case=True , __snake_case=True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : int = n_positions
_SCREAMING_SNAKE_CASE : List[str] = n_embd
_SCREAMING_SNAKE_CASE : str = n_layer
_SCREAMING_SNAKE_CASE : List[str] = n_head
_SCREAMING_SNAKE_CASE : List[str] = n_inner
_SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
_SCREAMING_SNAKE_CASE : Tuple = resid_pdrop
_SCREAMING_SNAKE_CASE : Dict = embd_pdrop
_SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
_SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : str = initializer_range
_SCREAMING_SNAKE_CASE : Optional[int] = scale_attn_weights
_SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
_SCREAMING_SNAKE_CASE : List[str] = attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE : Optional[Any] = scale_attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE : Dict = multi_query
_SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
_SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 200
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
assert column_title.isupper()
__A = 0
__A = len(__UpperCamelCase ) - 1
__A = 0
while index >= 0:
__A = (ord(column_title[index] ) - 6_4) * pow(2_6 , __UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 266
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase : str = logging.get_logger(__name__)
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 316
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_a = False
class A_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : str ) -> Optional[Any]:
__lowerCAmelCase: Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCAmelCase: str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCAmelCase: int = torch.manual_seed(0 )
__lowerCAmelCase: Union[str, Any] = pipe(
image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowerCAmelCase: int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCAmelCase: str = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
__UpperCAmelCase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 119
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = str(__UpperCamelCase )
return len(__UpperCamelCase ) == 9 and set(__UpperCamelCase ) == set("""123456789""" )
def lowerCAmelCase_ ( ):
for base_num in range(9999,4999,-1 ):
_A : List[str] = 100002 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
for base_num in range(333,99,-1 ):
_A : Optional[Any] = 1002003 * base_num
if is_9_pandigital(__UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase_ ( ):
lowercase__ : Dict = torch.nn.Linear(2 , 4)
lowercase__ : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0)
lowercase__ : Optional[int] = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1)
lowercase__ : Any = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))
lowercase__ : Any = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase_ ( _lowerCamelCase : Tuple):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()
model.load_state_dict(__UpperCamelCase)
class snake_case_ ( snake_case_ ):
@require_cuda
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
lowercase__ : Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase_ ):
lowercase__ : Any = Accelerator(cpu=lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowercase__ : Union[str, Any] = Accelerator()
lowercase__ : Optional[int] = GradientState()
assert state.num_steps == 1
lowercase__ : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowercase__ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Any = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = create_components()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCamelCase ( self : Dict ) -> Tuple:
lowercase__ : Dict = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCamelCase ( self : List[str] ) -> Dict:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase_ : Dict , **lowercase_ : List[Any] ):
pass
with patch("torch.cuda.set_device" , lowercase_ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowercase__ : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : str = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = get_signature(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : List[str] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = create_components()
accelerator.prepare(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = get_signature(lowercase_ )
# saving hook
def save_config(lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase__ : Union[str, Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase_ , "data.json" ) , "w" ) as f:
json.dump(lowercase_ , lowercase_ )
# loading hook
def load_config(lowercase_ : Optional[int] , lowercase_ : str ):
with open(os.path.join(lowercase_ , "data.json" ) , "r" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Any = config["class_name"]
lowercase__ : Optional[int] = accelerator.register_save_state_pre_hook(lowercase_ )
lowercase__ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase__ : Tuple = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase_ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
lowercase__ : List[str] = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase_ )
self.assertTrue(abs(model_signature - get_signature(lowercase_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : List[str] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = create_components()
lowercase__ : Dict = None
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertTrue(dummy_obj is None )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = Accelerator()
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = create_components()
lowercase__ : int = [1, 2, 3]
# This should work
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase_ , "_is_accelerate_prepared" , lowercase_ ) , lowercase_ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
from transformers import AutoModelForCausalLM
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map={"": 0} , )
lowercase__ : int = Accelerator()
# This should work
lowercase__ : List[str] = accelerator.prepare(lowercase_ )
@slow
@require_bnb
def __UpperCamelCase ( self : List[str] ) -> List[str]:
from transformers import AutoModelForCausalLM
lowercase__ : Tuple = Accelerator()
with init_empty_weights():
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ : Dict = infer_auto_device_map(lowercase_ )
lowercase__ : Optional[Any] = "cpu"
lowercase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase_ , load_in_abit=lowercase_ , llm_inta_enable_fpaa_cpu_offload=lowercase_ )
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowercase__ : Union[str, Any] = accelerator.prepare(lowercase_ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase ( self : Any ) -> List[str]:
from transformers import AutoModelForCausalLM
lowercase__ : int = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowercase__ : Union[str, Any] = infer_auto_device_map(lowercase_ )
lowercase__ : List[Any] = 1
lowercase__ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map=lowercase_ , )
lowercase__ : str = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase_ ):
lowercase__ : Tuple = accelerator.prepare(lowercase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowercase__ : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowercase__ : Any = infer_auto_device_map(lowercase_ )
lowercase__ : int = 1
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase_ , device_map=lowercase_ , )
lowercase__ : str = Accelerator()
# This should work
lowercase__ : Optional[Any] = accelerator.prepare(lowercase_ )
@require_cuda
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowercase__ : Union[str, Any] = torch.nn.Linear(10 , 10 )
lowercase__ : int = torch.optim.SGD(model.parameters() , lr=0.01 )
lowercase__ : int = Accelerator(cpu=lowercase_ )
lowercase__ : List[Any] = accelerator.prepare(lowercase_ )
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __lowerCAmelCase :
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return self.get_dummy_input()
@property
def _lowerCamelCase ( self : int) -> Dict:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.")
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any]=True , A : str=False , A : Tuple=False , A : Optional[Any]=False , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 32
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = torch.device(A)
_UpperCAmelCase = (batch_size, num_channels) + sizes
_UpperCAmelCase = randn_tensor(A , generator=A , device=A)
_UpperCAmelCase = {'hidden_states': hidden_states}
if include_temb:
_UpperCAmelCase = 1_28
_UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=A , device=A)
if include_res_hidden_states_tuple:
_UpperCAmelCase = torch.manual_seed(1)
_UpperCAmelCase = (randn_tensor(A , generator=A , device=A),)
if include_encoder_hidden_states:
_UpperCAmelCase = floats_tensor((batch_size, 32, 32)).to(A)
if include_skip_sample:
_UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=A , device=A)
return dummy_input
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
_UpperCAmelCase = 32
if self.block_type == "mid":
init_dict.pop('out_channels')
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : List[str] , A : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**A)
unet_block.to(A)
unet_block.eval()
with torch.no_grad():
_UpperCAmelCase = unet_block(**A)
if isinstance(A , A):
_UpperCAmelCase = output[0]
self.assertEqual(output.shape , self.output_shape)
_UpperCAmelCase = output[0, -1, -3:, -3:]
_UpperCAmelCase = torch.tensor(A).to(A)
assert torch_all_close(output_slice.flatten() , A , atol=5E-3)
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps')
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.block_class(**A)
model.to(A)
model.train()
_UpperCAmelCase = model(**A)
if isinstance(A , A):
_UpperCAmelCase = output[0]
_UpperCAmelCase = torch.device(A)
_UpperCAmelCase = randn_tensor(output.shape , device=A)
_UpperCAmelCase = torch.nn.functional.mse_loss(A , A)
loss.backward()
| 339
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 0
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : Dict =sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ : Any =[i[0] for i in r], [i[1] for i in r]
lowerCamelCase_ : Union[str, Any] =list(accumulate(__UpperCamelCase ) )
lowerCamelCase_ : Optional[Any] =bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _A( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = IFInpaintingPipeline
UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase_ ( self ):
return self._get_dummy_components()
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(_A ).startswith('mps' ):
__A : Union[str, Any] = torch.manual_seed(_A )
else:
__A : int = torch.Generator(device=_A ).manual_seed(_A )
__A : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__A : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__A : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase_ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCAmelCase_ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self ):
self._test_save_load_local()
def UpperCAmelCase_ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCamelCase ( lowercase : str , lowercase : complex , lowercase : str = "x" , lowercase : float = 10**-10 , lowercase : int = 1 , ) -> Optional[int]:
_a = symbols(__UpperCamelCase )
_a = lambdify(__UpperCamelCase , __UpperCamelCase )
_a = lambdify(__UpperCamelCase , diff(__UpperCamelCase , __UpperCamelCase ) )
_a = starting_point
while True:
if diff_function(__UpperCamelCase ) != 0:
_a = prev_guess - multiplicity * func(__UpperCamelCase ) / diff_function(
__UpperCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_a = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 63
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : Union[str, Any] = ['image_processor', 'tokenizer']
A_ : Tuple = 'OwlViTImageProcessor'
A_ : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __snake_case=None , __snake_case=None , **__snake_case ):
_SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
_SCREAMING_SNAKE_CASE : Any = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="max_length" , __snake_case="np" , **__snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__snake_case , __snake_case ) or (isinstance(__snake_case , __snake_case ) and not isinstance(text[0] , __snake_case )):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case )]
elif isinstance(__snake_case , __snake_case ) and isinstance(text[0] , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Maximum number of queries across batch
_SCREAMING_SNAKE_CASE : Any = max([len(__snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__snake_case ) != max_num_queries:
_SCREAMING_SNAKE_CASE : List[str] = t + [""" """] * (max_num_queries - len(__snake_case ))
_SCREAMING_SNAKE_CASE : Any = self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case )
encodings.append(__snake_case )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_SCREAMING_SNAKE_CASE : Dict = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_SCREAMING_SNAKE_CASE : str = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_SCREAMING_SNAKE_CASE : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : int = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_SCREAMING_SNAKE_CASE : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_SCREAMING_SNAKE_CASE : int = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BatchEncoding()
_SCREAMING_SNAKE_CASE : Dict = input_ids
_SCREAMING_SNAKE_CASE : List[Any] = attention_mask
if query_images is not None:
_SCREAMING_SNAKE_CASE : Dict = BatchEncoding()
_SCREAMING_SNAKE_CASE : List[str] = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case ).pixel_values
_SCREAMING_SNAKE_CASE : int = query_pixel_values
if images is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_SCREAMING_SNAKE_CASE : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.image_processor.post_process(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.image_processor.post_process_object_detection(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.image_processor.post_process_image_guided_detection(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def UpperCAmelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase_ = 25_0004
lowercase_ = 25_0020
@require_sentencepiece
@require_tokenizers
class snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = MBartTokenizer
A_ : List[str] = MBartTokenizerFast
A_ : Tuple = True
A_ : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = MBartTokenizer(_lowerCamelCase, keep_accents=_lowerCamelCase )
__A = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]], )
__A = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
], )
__A = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
__A = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase, [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
], )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__A = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__A = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
__A = self.tokenizer_class.from_pretrained(_lowerCamelCase, **_lowerCamelCase )
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__A = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase, _lowerCamelCase )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__A = tempfile.mkdtemp()
__A = tokenizer_r.save_pretrained(_lowerCamelCase, legacy_format=_lowerCamelCase )
__A = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__A = tokenizer_r.from_pretrained(_lowerCamelCase )
__A = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase, _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = 'facebook/mbart-large-en-ro'
A_ : str = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
A_ : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
A_ : Tuple = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ):
'''simple docstring'''
__A = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='''en_XX''', tgt_lang='''ro_RO''' )
__A = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''], 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''], 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''], 25_00_20 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
self.assertIn(_lowerCamelCase, self.tokenizer.all_special_ids )
__A = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__A = self.tokenizer.decode(_lowerCamelCase, skip_special_tokens=_lowerCamelCase )
__A = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0], _lowerCamelCase )
__A = 10
__A = self.tokenizer(_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], _lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ), [25_00_26, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = tempfile.mkdtemp()
__A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
__A = MBartTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, _lowerCamelCase )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, return_tensors='''pt''' )
__A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=len(self.expected_src_tokens ), return_tensors='''pt''', )
__A = shift_tokens_right(batch['''labels'''], self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
__A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, _lowerCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=3, return_tensors='''pt''' )
__A = self.tokenizer(
text_target=self.tgt_text, padding=_lowerCamelCase, truncation=_lowerCamelCase, max_length=10, return_tensors='''pt''' )
__A = targets['''input_ids''']
__A = shift_tokens_right(_lowerCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self.tokenizer._build_translation_inputs(
'''A test''', return_tensors='''pt''', src_lang='''en_XX''', tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ), {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
}, )
| 266
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
def A ( snake_case :list[list[int]] ) -> Dict:
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 322
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=snake_case_ ):
UpperCAmelCase__ : List[str] = ['keras_nlp']
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['keras_nlp'] )
| 119
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
_A , _A : Dict = coefficient_matrix.shape
_A , _A : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
_A : Optional[int] = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
_A : Optional[Any] = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
_A : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
_A : str = (
"""Number of initial values must be equal to number of rows in coefficient """
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_A : Dict = np.concatenate(
(coefficient_matrix, constant_matrix),axis=1 )
_A , _A : Union[str, Any] = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
_A : Optional[int] = []
for row in range(__UpperCamelCase ):
_A : List[Any] = 0
for col in range(__UpperCamelCase ):
if col == row:
_A : Optional[int] = table[row][col]
elif col == cols - 1:
_A : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_A : str = (temp + val) / denom
new_val.append(__UpperCamelCase )
_A : int = new_val
return [float(__UpperCamelCase ) for i in new_val]
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : int = table.shape
_A : Any = True
for i in range(0,__UpperCamelCase ):
_A : Optional[Any] = 0
for j in range(0,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__A : bool = field(
default=snake_case_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class snake_case_ :
__A : str = field(
default=snake_case_ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__A : str = field(
default=snake_case_ ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Train language if it is different from the evaluation language."} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__A : Optional[bool] = field(
default=snake_case_ ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__A : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__A : bool = field(
default=snake_case_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def lowercase_ ( ):
lowercase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __UpperCamelCase)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase)
datasets.utils.logging.set_verbosity(__UpperCamelCase)
transformers.utils.logging.set_verbosity(__UpperCamelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(f'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
lowercase__ : Tuple = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : str = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ : Tuple = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : Dict = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[str] = train_dataset.features["label"].names
if training_args.do_eval:
lowercase__ : Any = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Tuple = eval_dataset.features["label"].names
if training_args.do_predict:
lowercase__ : List[str] = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[Any] = predict_dataset.features["label"].names
# Labels
lowercase__ : Optional[int] = len(__UpperCamelCase)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase): label for i, label in enumerate(__UpperCamelCase)} , labelaid={label: i for i, label in enumerate(__UpperCamelCase)} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : int = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : Optional[int] = False
def preprocess_function(_lowerCamelCase : List[str]):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : List[Any] = min(len(__UpperCamelCase) , data_args.max_train_samples)
lowercase__ : Union[str, Any] = train_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="train dataset map pre-processing"):
lowercase__ : Union[str, Any] = train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCamelCase)) , 3):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''')
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : Any = min(len(__UpperCamelCase) , data_args.max_eval_samples)
lowercase__ : Any = eval_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
lowercase__ : List[str] = eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ : Optional[int] = min(len(__UpperCamelCase) , data_args.max_predict_samples)
lowercase__ : List[str] = predict_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
lowercase__ : List[str] = predict_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowercase__ : List[Any] = evaluate.load("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction):
lowercase__ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase) else p.predictions
lowercase__ : Optional[int] = np.argmax(__UpperCamelCase , axis=1)
return metric.compute(predictions=__UpperCamelCase , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowercase__ : Dict = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8)
else:
lowercase__ : Tuple = None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Optional[int] = last_checkpoint
lowercase__ : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase)
lowercase__ : int = train_result.metrics
lowercase__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase)
)
lowercase__ : List[Any] = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __UpperCamelCase)
trainer.save_metrics("train" , __UpperCamelCase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=__UpperCamelCase)
lowercase__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase)
lowercase__ : List[str] = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.log_metrics("eval" , __UpperCamelCase)
trainer.save_metrics("eval" , __UpperCamelCase)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
lowercase__ , lowercase__ , lowercase__ : str = trainer.predict(__UpperCamelCase , metric_key_prefix="predict")
lowercase__ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase)
)
lowercase__ : Dict = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.log_metrics("predict" , __UpperCamelCase)
trainer.save_metrics("predict" , __UpperCamelCase)
lowercase__ : Tuple = np.argmax(__UpperCamelCase , axis=1)
lowercase__ : Dict = os.path.join(training_args.output_dir , "predictions.txt")
if trainer.is_world_process_zero():
with open(__UpperCamelCase , "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(__UpperCamelCase):
lowercase__ : Union[str, Any] = label_list[item]
writer.write(f'''{index}\t{item}\n''')
if __name__ == "__main__":
main()
| 87
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
_UpperCAmelCase = downstream_dict['projector.weight']
_UpperCAmelCase = downstream_dict['projector.bias']
_UpperCAmelCase = downstream_dict['model.post_net.linear.weight']
_UpperCAmelCase = downstream_dict['model.post_net.linear.bias']
return model
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
_UpperCAmelCase = downstream_dict['model.linear.weight']
_UpperCAmelCase = downstream_dict['model.linear.bias']
return model
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
_UpperCAmelCase = downstream_dict['connector.weight']
_UpperCAmelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
_UpperCAmelCase = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_UpperCAmelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_UpperCAmelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = torch.load(__UpperCamelCase , map_location='cpu' )
_UpperCAmelCase = checkpoint['Downstream']
_UpperCAmelCase = WavaVecaConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_UpperCAmelCase = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
_UpperCAmelCase = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith('ForXVector' ):
_UpperCAmelCase = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCAmelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 339
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case_, unittest.TestCase ):
_UpperCAmelCase :List[str] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[str]=0 ):
lowerCamelCase_ : int =floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case__ ) )
lowerCamelCase_ : int =np.random.RandomState(snake_case__ )
lowerCamelCase_ : Tuple ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[int] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =self.get_dummy_inputs()
lowerCamelCase_ : str =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[Any] =np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : str =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any =np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : int =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : List[Any] =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
# warmup pass to apply optimizations
lowerCamelCase_ : Dict =pipe(**self.get_dummy_inputs() )
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : str =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any =self.get_dummy_inputs()
lowerCamelCase_ : int =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : List[str] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : Dict =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Union[str, Any] =np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Optional[Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : List[str] =np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Union[str, Any] =ort.SessionOptions()
lowerCamelCase_ : Any =False
return options
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : List[str] =init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowerCamelCase_ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : int =np.random.RandomState(0 )
lowerCamelCase_ : List[str] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Optional[Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase_ : int =np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : str =init_image.resize((768, 512) )
lowerCamelCase_ : List[str] =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : Tuple =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : List[Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowerCamelCase_ : str =np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 144
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 0
|
from math import pi
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] ) -> Tuple:
_a = [False] * len(__UpperCamelCase )
_a = []
queue.append(__UpperCamelCase )
_a = True
while queue:
_a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
_a = True
_a = u
return visited[t]
def _lowerCamelCase ( lowercase : int , lowercase : Tuple , lowercase : Dict ) -> Union[str, Any]:
_a = [-1] * (len(__UpperCamelCase ))
_a = 0
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_a = float("Inf" )
_a = sink
while s != source:
# Find the minimum value in select path
_a = min(__UpperCamelCase , graph[parent[s]][s] )
_a = parent[s]
max_flow += path_flow
_a = sink
while v != source:
_a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_a = parent[v]
return max_flow
lowerCAmelCase_ : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ : List[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 63
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 0
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase_ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
_SCREAMING_SNAKE_CASE : int = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
_SCREAMING_SNAKE_CASE : List[str] = 0
return None
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase_ : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 200
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class snake_case :
'''simple docstring'''
def __init__( self : int ):
'''simple docstring'''
__A = {}
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[Any], _lowerCamelCase : str, _lowerCamelCase : Dict=1 ):
'''simple docstring'''
if self.graph.get(_lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__A = [[w, v]]
if not self.graph.get(_lowerCamelCase ):
__A = []
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : str ):
'''simple docstring'''
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str=-2, _lowerCamelCase : List[str]=-1 ):
'''simple docstring'''
if s == d:
return []
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : str=-1 ):
'''simple docstring'''
if c == -1:
__A = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__A = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase, _lowerCamelCase, 1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[str]=-2 ):
'''simple docstring'''
__A = deque()
__A = []
if s == -2:
__A = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
__A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Any ):
'''simple docstring'''
__A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Tuple ):
'''simple docstring'''
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Tuple=-2 ):
'''simple docstring'''
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = s
__A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = False
indirect_parents.append(_lowerCamelCase )
__A = s
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = False
indirect_parents.append(_lowerCamelCase )
__A = s
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Tuple=-2, _lowerCamelCase : int=-1 ):
'''simple docstring'''
__A = time()
self.dfs(_lowerCamelCase, _lowerCamelCase )
__A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int]=-2 ):
'''simple docstring'''
__A = time()
self.bfs(_lowerCamelCase )
__A = time()
return end - begin
class snake_case :
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__A = {}
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[str], _lowerCamelCase : Tuple, _lowerCamelCase : str=1 ):
'''simple docstring'''
# check if the u exists
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__A = [[w, v]]
# add the other way
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
# the other way round
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Tuple=-2, _lowerCamelCase : Optional[int]=-1 ):
'''simple docstring'''
if s == d:
return []
__A = []
__A = []
if s == -2:
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=-1 ):
'''simple docstring'''
if c == -1:
__A = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__A = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase, _lowerCamelCase, 1 )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[Any]=-2 ):
'''simple docstring'''
__A = deque()
__A = []
if s == -2:
__A = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
__A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[str] ):
'''simple docstring'''
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = False
indirect_parents.append(_lowerCamelCase )
__A = s
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = []
__A = []
__A = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
__A = -2
__A = []
__A = s
__A = False
__A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__A = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__A = True
if len(_lowerCamelCase ) != 0:
__A = stack[len(_lowerCamelCase ) - 1]
else:
__A = False
indirect_parents.append(_lowerCamelCase )
__A = s
__A = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[Any]=-2, _lowerCamelCase : Union[str, Any]=-1 ):
'''simple docstring'''
__A = time()
self.dfs(_lowerCamelCase, _lowerCamelCase )
__A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : int=-2 ):
'''simple docstring'''
__A = time()
self.bfs(_lowerCamelCase )
__A = time()
return end - begin
| 266
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase : Dict = None
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Dict = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
UpperCamelCase : Optional[int] = '▁'
class __lowerCAmelCase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 316
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class A_ ( snake_case_ ):
def __init__( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=1 ) -> Any:
__lowerCAmelCase: Any = tokenizer
__lowerCAmelCase: Optional[Any] = dataset
__lowerCAmelCase: Optional[Any] = len(UpperCAmelCase ) if n_tasks is None else n_tasks
__lowerCAmelCase: Any = n_copies
def __iter__( self : Optional[int] ) -> Dict:
__lowerCAmelCase: Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
__lowerCAmelCase: List[str] = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class A_ ( snake_case_ ):
def __init__( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any ) -> int:
__lowerCAmelCase: int = start_length
__lowerCAmelCase: str = eof_strings
__lowerCAmelCase: int = tokenizer
def __call__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__lowerCAmelCase: Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = re.split('(%s)' % '|'.join(__UpperCamelCase ) , __UpperCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=20 , **SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: str = defaultdict(__UpperCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCamelCase ) ):
with torch.no_grad():
__lowerCAmelCase: Optional[Any] = batch['ids'].shape[-1]
__lowerCAmelCase: List[str] = accelerator.unwrap_model(__UpperCamelCase ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__UpperCamelCase , **__UpperCamelCase )
# each task is generated batch_size times
__lowerCAmelCase: List[str] = batch['task_id'].repeat(__UpperCamelCase )
__lowerCAmelCase: Union[str, Any] = accelerator.pad_across_processes(
__UpperCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = accelerator.gather((generated_tokens, generated_tasks) )
__lowerCAmelCase: Optional[int] = generated_tokens.cpu().numpy()
__lowerCAmelCase: int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCamelCase , __UpperCamelCase ):
gen_token_dict[task].append(__UpperCamelCase )
__lowerCAmelCase: Union[str, Any] = [[] for _ in range(__UpperCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__lowerCAmelCase: Union[str, Any] = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )
code_gens[task].append(remove_last_block(__UpperCamelCase ) )
return code_gens
def _a ( ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: int = HfArgumentParser(__UpperCamelCase )
__lowerCAmelCase: List[str] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__lowerCAmelCase: List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__lowerCAmelCase: int = 'false'
if args.num_workers is None:
__lowerCAmelCase: str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__lowerCAmelCase: Tuple = Accelerator()
set_seed(args.seed , device_specific=__UpperCamelCase )
# Load model and tokenizer
__lowerCAmelCase: Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowerCAmelCase: Any = tokenizer.eos_token
__lowerCAmelCase: str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__lowerCAmelCase: List[str] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCamelCase , __UpperCamelCase )] ),
}
# Load evaluation dataset and metric
__lowerCAmelCase: Tuple = load_dataset('openai_humaneval' )
__lowerCAmelCase: str = load_metric('code_eval' )
__lowerCAmelCase: Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
__lowerCAmelCase: Optional[int] = args.n_samples // args.batch_size
__lowerCAmelCase: Dict = TokenizedDataset(__UpperCamelCase , human_eval['test'] , n_copies=__UpperCamelCase , n_tasks=__UpperCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
__lowerCAmelCase: int = DataLoader(__UpperCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__lowerCAmelCase: Optional[Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'
' flag to enable code evaluation.' )
raise exception
__lowerCAmelCase , __lowerCAmelCase: Any = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase: Tuple = complete_code(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , n_tasks=__UpperCamelCase , batch_size=args.batch_size , **__UpperCamelCase , )
if accelerator.is_main_process:
__lowerCAmelCase: Any = []
for task in tqdm(range(__UpperCamelCase ) ):
__lowerCAmelCase: int = human_eval['test'][task]['test']
__lowerCAmelCase: int = f'''check({human_eval['test'][task]['entry_point']})'''
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = code_eval_metric.compute(
references=__UpperCamelCase , predictions=__UpperCamelCase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( snake_case__ : Any ) -> Any:
UpperCamelCase : Union[str, Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
UpperCamelCase , UpperCamelCase : Any = emb.weight.shape
UpperCamelCase : List[str] = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCamelCase : int = emb.weight.data
return lin_layer
def UpperCamelCase ( snake_case__ : str ) -> Dict:
UpperCamelCase : Union[str, Any] = torch.load(__UpperCamelCase , map_location='cpu' )
UpperCamelCase : List[str] = mam_aaa['args'] or mam_aaa['cfg']['model']
UpperCamelCase : Dict = mam_aaa['model']
remove_ignore_keys_(__UpperCamelCase )
UpperCamelCase : str = state_dict['encoder.embed_tokens.weight'].shape[0]
UpperCamelCase : Optional[Any] = MaMaaaConfig(
vocab_size=__UpperCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
UpperCamelCase : List[Any] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Dict = MaMaaaForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
UpperCamelCase : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 119
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=24 , _a=2 , _a=6 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=None , _a=1000 , ) -> List[Any]:
_A : int = parent
_A : Optional[int] = batch_size
_A : Dict = seq_length
_A : Any = is_training
_A : Union[str, Any] = use_input_mask
_A : List[Any] = use_token_type_ids
_A : List[str] = use_labels
_A : Dict = vocab_size
_A : Optional[Any] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : Any = num_attention_heads
_A : List[Any] = intermediate_size
_A : List[Any] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Union[str, Any] = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[Any] = type_sequence_label_size
_A : List[Any] = initializer_range
_A : Any = num_labels
_A : Dict = scope
_A : Optional[int] = range_bbox
def a__ ( self ) -> List[str]:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_A : Dict = bbox[i, j, 3]
_A : Optional[Any] = bbox[i, j, 1]
_A : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_A : List[str] = bbox[i, j, 2]
_A : Dict = bbox[i, j, 0]
_A : Union[str, Any] = t
_A : Optional[int] = None
if self.use_input_mask:
_A : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A : Tuple = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[int] = None
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : Union[str, Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Any:
_A : List[Any] = LiltModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a )
_A : Optional[Any] = model(_a , bbox=_a , token_type_ids=_a )
_A : List[str] = model(_a , bbox=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
_A : Dict = self.num_labels
_A : Tuple = LiltForTokenClassification(config=_a )
model.to(_a )
model.eval()
_A : Optional[Any] = model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
_A : Optional[int] = LiltForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_A : Dict = model(
_a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self ) -> Union[str, Any]:
_A : List[str] = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : List[str] = config_and_inputs
_A : Tuple = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( snake_case_,snake_case_,snake_case_,unittest.TestCase ):
_a = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def a__ ( self , _a , _a , _a , _a , _a ) -> str:
return True
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = LiltModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Tuple = type
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> int:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def a__ ( self ) -> str:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@slow
def a__ ( self ) -> List[str]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : str = LiltModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@slow
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(_a )
_A : Union[str, Any] = torch.tensor([[1, 2]] , device=_a )
_A : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a )
# forward pass
with torch.no_grad():
_A : Dict = model(input_ids=_a , bbox=_a )
_A : Tuple = torch.Size([1, 2, 768] )
_A : Optional[Any] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_a , )
self.assertTrue(outputs.last_hidden_state.shape , _a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1e-3 ) )
| 26
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Dict):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase)
lowercase__ : int = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
AutoTokenizer.from_pretrained(__UpperCamelCase).save_pretrained(__UpperCamelCase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCAmelCase = BitConfig(
conv_layer=__UpperCamelCase , num_labels=1_000 , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def A ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
if "stem.conv" in name:
_UpperCAmelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_UpperCAmelCase = name.replace('blocks' , 'layers' )
if "head.fc" in name:
_UpperCAmelCase = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
_UpperCAmelCase = 'bit.' + name
if "bit" not in name and "classifier" not in name:
_UpperCAmelCase = 'bit.encoder.' + name
return name
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple=False ) -> int:
'''simple docstring'''
_UpperCAmelCase = get_config(__UpperCamelCase )
# load original model from timm
_UpperCAmelCase = create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCAmelCase = state_dict.pop(__UpperCamelCase )
_UpperCAmelCase = val.squeeze() if 'head' in key else val
# load HuggingFace model
_UpperCAmelCase = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} , model=__UpperCamelCase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
_UpperCAmelCase = BitImageProcessor(
do_resize=__UpperCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=__UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(__UpperCamelCase ).unsqueeze(0 )
_UpperCAmelCase = processor(__UpperCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase , __UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCAmelCase = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
UpperCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 339
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case_, snake_case_, unittest.TestCase ):
_UpperCAmelCase :Dict = VQModel
_UpperCAmelCase :List[Any] = 'sample'
@property
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str=(32, 32) ):
lowerCamelCase_ : List[Any] =4
lowerCamelCase_ : Tuple =3
lowerCamelCase_ : Dict =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : List[str] ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Union[str, Any] ={
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCamelCase_ : List[str] =self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : List[Any] =VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
lowerCamelCase_ : Optional[int] =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[Any] =VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ : Tuple =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCamelCase_ : List[Any] =image.to(snake_case__ )
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(snake_case__ ).sample
lowerCamelCase_ : Union[str, Any] =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ : str =torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
| 144
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
UpperCAmelCase : List[str] = 2_56
# Modulus to hash a string
UpperCAmelCase : Optional[Any] = 1_00_00_03
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
__A : Optional[Any] = len(__UpperCamelCase )
__A : List[Any] = len(__UpperCamelCase )
if p_len > t_len:
return False
__A : List[Any] = 0
__A : Optional[Any] = 0
__A : int = 1
# Calculating the hash of pattern and substring of text
for i in range(__UpperCamelCase ):
__A : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A : Optional[int] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A : List[Any] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : Union[str, Any] = 'abc1abc12'
__A : str = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__A : Optional[int] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__UpperCamelCase , __UpperCamelCase ) and not rabin_karp(__UpperCamelCase , __UpperCamelCase )
# Test 2)
__A : Tuple = 'ABABX'
__A : Dict = 'ABABZABABYABABX'
assert rabin_karp(__UpperCamelCase , __UpperCamelCase )
# Test 3)
__A : Any = 'AAAB'
__A : int = 'ABAAAAAB'
assert rabin_karp(__UpperCamelCase , __UpperCamelCase )
# Test 4)
__A : Optional[Any] = 'abcdabcy'
__A : Optional[int] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__UpperCamelCase , __UpperCamelCase )
# Test 5)
__A : List[Any] = 'Lü'
__A : Tuple = 'Lüsai'
assert rabin_karp(__UpperCamelCase , __UpperCamelCase )
__A : Tuple = 'Lue'
assert not rabin_karp(__UpperCamelCase , __UpperCamelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowerCamelCase ( lowercase : Tuple ) -> Any:
_a = int(__UpperCamelCase )
_a , _a , _a = t // 3600, (t // 60) % 60, t % 60
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def _lowerCamelCase ( lowercase : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict=300 ) -> List[Any]:
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
_a = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_a = F'{elt:.6f}' if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(__UpperCamelCase )
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =5
__a =0.2
def __init__( self : int , __a : int , __a : Optional[str] = None , __a : bool = True , __a : Optional["NotebookTrainingTracker"] = None , __a : int = 3_00 , ):
_a = total
_a = "" if prefix is None else prefix
_a = leave
_a = parent
_a = width
_a = None
_a = None
_a = None
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : bool = False , __a : str = None ):
_a = value
if comment is not None:
_a = comment
if self.last_value is None:
_a = _a = time.time()
_a = _a = value
_a = _a = None
_a = self.warmup
_a = 1
self.update_bar(__a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_a = time.time()
_a = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_a = self.elapsed_time / (value - self.start_value)
else:
_a = None
if value >= self.total:
_a = self.total
_a = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_a = self.average_time_per_item * (self.total - value)
self.update_bar(__a )
_a = value
_a = current_time
if self.average_time_per_item is None:
_a = 1
else:
_a = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Any=None ):
_a = " " * (len(str(self.total ) ) - len(str(__a ) )) + str(__a )
if self.elapsed_time is None:
_a = f'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
_a = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
_a = (
f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
f' {format_time(self.predicted_remaining )}'
)
self.label += f', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]'
self.display()
def UpperCamelCase__ ( self : str ):
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase__ ( self : Dict ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class __SCREAMING_SNAKE_CASE (snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : Optional[int] , __a : int=None ):
super().__init__(__a )
_a = None if column_names is None else [column_names]
_a = None
def UpperCamelCase__ ( self : Any ):
_a = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_a = disp.display(disp.HTML(self.html_code ) , display_id=__a )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCamelCase__ ( self : List[Any] , __a : Optional[Any] ):
if self.inner_table is None:
_a = [list(values.keys() ), list(values.values() )]
else:
_a = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__a )
_a = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCamelCase__ ( self : str , __a : Optional[Any] , __a : Dict=None , __a : List[Any]=3_00 ):
_a = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a )
return self.child_bar
def UpperCamelCase__ ( self : List[Any] ):
_a = None
self.display()
class __SCREAMING_SNAKE_CASE (snake_case_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
_a = None
_a = None
_a = False
def UpperCamelCase__ ( self : Any , __a : Optional[Any] , __a : int , __a : Union[str, Any] , **__a : Optional[int] ):
_a = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
_a = 0
_a = 0
_a = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
_a = NotebookTrainingTracker(state.max_steps , __a )
def UpperCamelCase__ ( self : Optional[Any] , __a : List[Any] , __a : Any , __a : int , **__a : Dict ):
_a = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 , comment=f'Epoch {epoch}/{state.num_train_epochs}' , force_update=self._force_next_update , )
_a = False
def UpperCamelCase__ ( self : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : Dict , __a : List[str]=None , **__a : Optional[Any] ):
if not has_length(__a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_a = self.training_tracker.add_child(len(__a ) )
else:
_a = NotebookProgressBar(len(__a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCamelCase__ ( self : Dict , __a : List[Any] , __a : str , __a : Dict , **__a : List[str] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
_a = None
def UpperCamelCase__ ( self : List[str] , __a : Union[str, Any] , __a : Optional[int] , __a : str , __a : Optional[int]=None , **__a : Optional[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_a = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
_a = state.global_step
self.training_tracker.write_line(__a )
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : str , __a : Any , __a : Dict=None , **__a : Optional[Any] ):
if self.training_tracker is not None:
_a = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
_a = log["loss"]
break
if self.first_column == "Epoch":
_a = int(state.epoch )
else:
_a = state.global_step
_a = "eval"
for k in metrics:
if k.endswith("_loss" ):
_a = re.sub(r"\_loss$" , "" , __a )
_a = metrics.pop("total_flos" , __a )
_a = metrics.pop("epoch" , __a )
_a = metrics.pop(f'{metric_key_prefix}_runtime' , __a )
_a = metrics.pop(f'{metric_key_prefix}_samples_per_second' , __a )
_a = metrics.pop(f'{metric_key_prefix}_steps_per_second' , __a )
_a = metrics.pop(f'{metric_key_prefix}_jit_compilation_time' , __a )
for k, v in metrics.items():
if k == f'{metric_key_prefix}_loss':
_a = v
else:
_a = k.split("_" )
_a = " ".join([part.capitalize() for part in splits[1:]] )
_a = v
self.training_tracker.write_line(__a )
self.training_tracker.remove_child()
_a = None
# Evaluation takes a long time so we should force the next update.
_a = True
def UpperCamelCase__ ( self : List[Any] , __a : str , __a : Dict , __a : List[str] , **__a : Any ):
self.training_tracker.update(
state.global_step , comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}' , force_update=__a )
_a = None
| 63
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
'''simple docstring'''
import copy
import re
class lowercase__ :
'''simple docstring'''
A_ : Optional[int] = 'hp'
A_ : str = {}
A_ : List[Any] = None
@classmethod
def UpperCAmelCase_ ( cls , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = prefix
_SCREAMING_SNAKE_CASE : int = defaults
cls.build_naming_info()
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
if len(__snake_case ) == 0:
return ""
_SCREAMING_SNAKE_CASE : List[Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__snake_case ) + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE : Optional[int] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = """"""
while integer != 0:
_SCREAMING_SNAKE_CASE : List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
_SCREAMING_SNAKE_CASE : Any = 0
while True:
_SCREAMING_SNAKE_CASE : Optional[Any] = word + """#""" + int_to_alphabetic(__snake_case )
if sword in info["reverse_short_word"]:
continue
else:
_SCREAMING_SNAKE_CASE : List[str] = sword
break
_SCREAMING_SNAKE_CASE : List[str] = short_word
_SCREAMING_SNAKE_CASE : Any = word
return short_word
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = param_name.split("""_""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [TrialShortNamer.shortname_for_word(__snake_case , __snake_case ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_SCREAMING_SNAKE_CASE : Any = ["""""", """_"""]
for separator in separators:
_SCREAMING_SNAKE_CASE : Dict = separator.join(__snake_case )
if shortname not in info["reverse_short_param"]:
_SCREAMING_SNAKE_CASE : Any = shortname
_SCREAMING_SNAKE_CASE : Dict = param_name
return shortname
return param_name
@staticmethod
def UpperCAmelCase_ ( __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = TrialShortNamer.shortname_for_key(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = short_name
_SCREAMING_SNAKE_CASE : Optional[int] = param_name
@classmethod
def UpperCAmelCase_ ( cls ):
if cls.NAMING_INFO is not None:
return
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = info
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
cls.build_naming_info()
assert cls.PREFIX is not None
_SCREAMING_SNAKE_CASE : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = 1 if v else 0
_SCREAMING_SNAKE_CASE : Dict = """""" if isinstance(__snake_case , (int, float) ) else """-"""
_SCREAMING_SNAKE_CASE : Any = f"""{key}{sep}{v}"""
name.append(__snake_case )
return "_".join(__snake_case )
@classmethod
def UpperCAmelCase_ ( cls , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_SCREAMING_SNAKE_CASE : List[Any] = []
else:
_SCREAMING_SNAKE_CASE : str = repr.split("""_""" )
_SCREAMING_SNAKE_CASE : Dict = {}
for value in values:
if "-" in value:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = value.split("""-""" )
else:
_SCREAMING_SNAKE_CASE : List[Any] = re.sub("""[0-9.]""" , """""" , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = float(re.sub("""[^0-9.]""" , """""" , __snake_case ) )
_SCREAMING_SNAKE_CASE : int = cls.NAMING_INFO["""reverse_short_param"""][p_k]
_SCREAMING_SNAKE_CASE : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_SCREAMING_SNAKE_CASE : Optional[Any] = cls.DEFAULTS[k]
return parameters
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : str = "cpu", _lowerCamelCase : str = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__A = device
__A = CLIPTokenizerFast.from_pretrained(_lowerCamelCase )
__A = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
__A = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
__A = torchvision.transforms.Normalize(self.image_mean, self.image_std )
__A = torchvision.transforms.Resize(2_24 )
__A = torchvision.transforms.CenterCrop(2_24 )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = self.resize(_lowerCamelCase )
__A = self.center_crop(_lowerCamelCase )
__A = self.normalize(_lowerCamelCase )
return images
def __call__( self : List[str], _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Dict=None, **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = self.tokenizer(text=_lowerCamelCase, **_lowerCamelCase )
__A = self.preprocess_img(_lowerCamelCase )
__A = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int=10, _lowerCamelCase : List[str]=0.01, _lowerCamelCase : str=None, _lowerCamelCase : List[str]=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Dict=None, _lowerCamelCase : Any=False, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Tuple="image", _lowerCamelCase : Dict=True, _lowerCamelCase : Optional[int]=False, _lowerCamelCase : List[Any]=False, _lowerCamelCase : Union[str, Any]=False, ):
'''simple docstring'''
super().__init__()
__A = None
__A = device if device else get_device()
if vqgan:
__A = vqgan
else:
__A = load_vqgan(self.device, conf_path=_lowerCamelCase, ckpt_path=_lowerCamelCase )
self.vqgan.eval()
if clip:
__A = clip
else:
__A = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__A = ProcessorGradientFlow(device=self.device )
__A = iterations
__A = lr
__A = log
__A = make_grid
__A = return_val
__A = quantize
__A = self.vqgan.decoder.z_shape
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[Any]=None, _lowerCamelCase : int=None, _lowerCamelCase : List[Any]=5, _lowerCamelCase : Optional[Any]=True ):
'''simple docstring'''
__A = []
if output_path is None:
__A = '''./animation.gif'''
if input_path is None:
__A = self.save_path
__A = sorted(glob(input_path + '''/*''' ) )
if not len(_lowerCamelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(_lowerCamelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__A = total_duration / len(_lowerCamelCase )
__A = [frame_duration] * len(_lowerCamelCase )
if extend_frames:
__A = 1.5
__A = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(_lowerCamelCase ) )
imageio.mimsave(_lowerCamelCase, _lowerCamelCase, duration=_lowerCamelCase )
print(f'gif saved to {output_path}' )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__A = preprocess(Image.open(_lowerCamelCase ), target_image_size=2_56 ).to(self.device )
__A = preprocess_vqgan(_lowerCamelCase )
__A , *__A = self.vqgan.encode(_lowerCamelCase )
return z
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : str ):
'''simple docstring'''
__A = self.latent.detach().requires_grad_()
__A = base_latent + transform_vector
if self.quantize:
__A , *__A = self.vqgan.quantize(_lowerCamelCase )
else:
__A = trans_latent
return self.vqgan.decode(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Tuple, _lowerCamelCase : Tuple, _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
__A = self.clip_preprocessor(text=_lowerCamelCase, images=_lowerCamelCase, return_tensors='''pt''', padding=_lowerCamelCase )
__A = self.clip(**_lowerCamelCase )
__A = clip_outputs.logits_per_image
if weights is not None:
__A = similarity_logits * weights
return similarity_logits.sum()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict, _lowerCamelCase : int, _lowerCamelCase : List[str] ):
'''simple docstring'''
__A = self._get_clip_similarity(pos_prompts['''prompts'''], _lowerCamelCase, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__A = self._get_clip_similarity(neg_prompts['''prompts'''], _lowerCamelCase, weights=neg_prompts['''weights'''] )
else:
__A = torch.tensor([1], device=self.device )
__A = -torch.log(_lowerCamelCase ) + torch.log(_lowerCamelCase )
return loss
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Optional[int] ):
'''simple docstring'''
__A = torch.randn_like(self.latent, requires_grad=_lowerCamelCase, device=self.device )
__A = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__A = self._add_vector(_lowerCamelCase )
__A = loop_post_process(_lowerCamelCase )
__A = self._get_CLIP_loss(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
print('''CLIP loss''', _lowerCamelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=_lowerCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[int], _lowerCamelCase : Any, _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
wandb.init(reinit=_lowerCamelCase, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__A = Image.open(_lowerCamelCase )
__A = image.resize((2_56, 2_56) )
wandb.log('''Original Image''', wandb.Image(_lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Any ):
'''simple docstring'''
if not prompts:
return []
__A = []
__A = []
if isinstance(_lowerCamelCase, _lowerCamelCase ):
__A = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(_lowerCamelCase, (tuple, list) ):
__A = prompt[0]
__A = float(prompt[1] )
elif ":" in prompt:
__A , __A = prompt.split(''':''' )
__A = float(_lowerCamelCase )
else:
__A = prompt
__A = 1.0
processed_prompts.append(_lowerCamelCase )
weights.append(_lowerCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_lowerCamelCase, device=self.device ),
}
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : str, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : str=False, _lowerCamelCase : List[str]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : str=None, ):
'''simple docstring'''
if image_path:
__A = self._get_latent(_lowerCamelCase )
else:
__A = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__A = self.process_prompts(_lowerCamelCase )
__A = self.process_prompts(_lowerCamelCase )
if save_final and save_path is None:
__A = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
else:
__A = save_path + '''_''' + get_timestamp()
os.makedirs(_lowerCamelCase )
__A = save_path
__A = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(_lowerCamelCase ) )
__A = loop_post_process(_lowerCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ):
if show_intermediate:
show_pil(_lowerCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'''Image''': wandb.Image(_lowerCamelCase )} )
if show_final:
show_pil(_lowerCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}_final.png' ) )
| 266
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def A ( snake_case :List[Any] , snake_case :Tuple , snake_case :Union[str, Any] , snake_case :Dict , snake_case :int ) -> str:
for attribute in key.split('.' ):
__UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
__UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A ( snake_case :Tuple , snake_case :Union[str, Any] ) -> int:
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCamelCase = None
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
elif name.split('.' )[0] == "proj":
__UpperCamelCase = fairseq_model.proj
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__UpperCamelCase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __UpperCamelCase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def A ( snake_case :Any , snake_case :List[Any] , snake_case :Tuple , snake_case :Any , snake_case :Optional[Any] ) -> str:
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__UpperCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__UpperCamelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def A ( snake_case :Any ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
__UpperCamelCase = emb.weight.data
return lin_layer
def A ( snake_case :List[Any] ) -> Union[str, Any]:
with open(__UpperCamelCase , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = [line.split(' ' )[0] for line in lines]
__UpperCamelCase = len(__UpperCamelCase )
__UpperCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__UpperCamelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A ( snake_case :int , snake_case :Dict , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :str , snake_case :List[str] , snake_case :Dict , ) -> str:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__UpperCamelCase )
__UpperCamelCase = SpeechaTextaConfig.from_pretrained(
__UpperCamelCase , vocab_size=__UpperCamelCase , decoder_layers=__UpperCamelCase , do_stable_layer_norm=__UpperCamelCase )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCamelCase = WavaVecaModel(__UpperCamelCase )
__UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
__UpperCamelCase = SpeechaTextaForCausalLM(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
__UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
__UpperCamelCase = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
__UpperCamelCase = False
# add projection layer
__UpperCamelCase = nn.Parameter(projection_layer.weight )
__UpperCamelCase = nn.Parameter(projection_layer.bias )
__UpperCamelCase = create_vocab_dict(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase = SpeechaTextaTokenizer(os.path.join(__UpperCamelCase , 'vocab.json' ) )
tokenizer.save_pretrained(__UpperCamelCase )
__UpperCamelCase = hf_wavavec.config.to_dict()
__UpperCamelCase = tokenizer.pad_token_id
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = tokenizer.eos_token_id
__UpperCamelCase = 'speech_to_text_2'
__UpperCamelCase = 'wav2vec2'
__UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
UpperCamelCase : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _a ( SCREAMING_SNAKE_CASE : bool = True , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
__lowerCAmelCase: Union[str, Any] = False
if main_process_only:
__lowerCAmelCase: Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase , **__UpperCamelCase , disable=__UpperCamelCase )
| 322
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
from __future__ import annotations
import math
def UpperCamelCase ( snake_case__ : int , snake_case__ : int , snake_case__ : bool , snake_case__ : list[int] , snake_case__ : float ) -> Union[str, Any]:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
)
def UpperCamelCase ( ) -> Any:
UpperCamelCase : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase : Union[str, Any] = math.log(len(__UpperCamelCase ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 119
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_snake_case = datasets.logging.get_logger(__name__)
_snake_case = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
_snake_case = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
_snake_case = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
_snake_case = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def a__ ( self , _a ) -> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
_A : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
_A : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_A : Tuple = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_A : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_A : Union[str, Any] = score.BleurtScorer(os.path.join(_a , _a ) )
def a__ ( self , _a , _a ) -> List[str]:
_A : List[str] = self.scorer.score(references=_a , candidates=_a )
return {"scores": scores}
| 26
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset")}),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42)}),
SplitDict({"train": SplitInfo()}),
] , )
def lowercase_ ( _lowerCamelCase : SplitDict):
lowercase__ : str = split_dict._to_yaml_list()
assert len(__UpperCamelCase) == len(__UpperCamelCase)
lowercase__ : int = SplitDict._from_yaml_list(__UpperCamelCase)
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase__ : Union[str, Any] = None
# the split name of split_dict takes over the name of the split info object
lowercase__ : str = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__UpperCamelCase), SplitInfo(dataset_name="my_dataset")])
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : List[Any] = asdict(SplitDict({"train": split_info}))
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 87
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
inspect_dataset(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = path + '.py'
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
inspect_metric(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = path + '.py'
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = get_dataset_config_info(__UpperCamelCase , config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase , config_name=__UpperCamelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCAmelCase = expected_configs[0]
assert expected_config in infos
_UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
_UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase , config_name=__UpperCamelCase )
| 339
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _snake_case ( lowerCamelCase__ : list[float] ) -> Dict:
return np.maximum(0 , __UpperCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 144
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _A( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _A = 768 , ):
super().__init__()
__A : str = nn.Parameter(torch.zeros(1 , _A ) )
__A : List[Any] = nn.Parameter(torch.ones(1 , _A ) )
def UpperCAmelCase_ ( self , _A = None , _A = None , ):
__A : Dict = nn.Parameter(self.mean.to(_A ).to(_A ) )
__A : List[Any] = nn.Parameter(self.std.to(_A ).to(_A ) )
return self
def UpperCAmelCase_ ( self , _A ):
__A : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = (embeds * self.std) + self.mean
return embeds
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase_ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase_ : Optional[int] = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
lowerCAmelCase_ : str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __SCREAMING_SNAKE_CASE (snake_case_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_INIT_CONFIGURATION
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =ElectraTokenizer
def __init__( self : Tuple , __a : Dict=None , __a : Optional[int]=None , __a : Any=True , __a : Any="[UNK]" , __a : Union[str, Any]="[SEP]" , __a : List[Any]="[PAD]" , __a : Union[str, Any]="[CLS]" , __a : List[Any]="[MASK]" , __a : List[str]=True , __a : Any=None , **__a : Union[str, Any] , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
_a = getattr(__a , normalizer_state.pop("type" ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__a )
_a = do_lower_case
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any]=None ):
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self : Tuple , __a : str , __a : Optional[str] = None ):
_a = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 63
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 0
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = set()
# edges = list of graph's edges
_SCREAMING_SNAKE_CASE : str = get_edges(__UpperCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = edges.pop()
chosen_vertices.add(__UpperCamelCase )
chosen_vertices.add(__UpperCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCamelCase )
return chosen_vertices
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 200
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase_ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8000,
'sample_size': 6_5536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8000,
'sample_size': 13_1072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6000,
'sample_size': 6_5536,
},
}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return torch.atana(__UpperCamelCase , __UpperCamelCase ) / math.pi * 2
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = torch.sin(t * math.pi / 2 ) ** 2
__A = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCamelCase , __UpperCamelCase )
class snake_case ( snake_case_ ):
'''simple docstring'''
pass
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__A = DiffusionAttnUnetaD(_lowerCamelCase, n_attn_layers=4 )
__A = deepcopy(self.diffusion )
__A = torch.quasirandom.SobolEngine(1, scramble=_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowercase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowercase_ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowercase_ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowercase_ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowercase_ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowercase_ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
return name.replace(__UpperCamelCase , __UpperCamelCase )
elif name.startswith(__UpperCamelCase ):
return [name.replace(__UpperCamelCase , __UpperCamelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=1_3 ):
"""simple docstring"""
__A = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
__A = 0
if string.startswith('''net.3.''' ):
depth += 1
__A = string[6:]
elif string.startswith('''net.''' ):
__A = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__A = string[7:]
if string.startswith('''main.''' ):
__A = string[5:]
# mid block
if string[:2].isdigit():
__A = string[:2]
__A = string[2:]
else:
__A = string[0]
__A = string[1:]
if depth == max_depth:
__A = MID_NUM_TO_LAYER[layer_num]
__A = '''mid_block'''
elif depth > 0 and int(__UpperCamelCase ) < 7:
__A = DOWN_NUM_TO_LAYER[layer_num]
__A = f'down_blocks.{depth}'
elif depth > 0 and int(__UpperCamelCase ) > 7:
__A = UP_NUM_TO_LAYER[layer_num]
__A = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__A = DEPTH_0_TO_LAYER[layer_num]
__A = f'up_blocks.{max_depth - 1}' if int(__UpperCamelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
__A = string_left[1:]
if "resnets" in new_layer:
__A = convert_resconv_naming(__UpperCamelCase )
elif "attentions" in new_layer:
__A = convert_attn_naming(__UpperCamelCase )
__A = new_string_left
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__A = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__A = rename(__UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = transform_conv_attns(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__A = v
return new_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if len(__UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
__A = v[:, :, 0]
else:
# bias
__A = v
else:
# qkv matrices
__A = v.shape[0]
__A = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__A = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__A = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__A = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__A = download(__UpperCamelCase )
__A = MODELS_MAP[model_name]['''sample_rate''']
__A = MODELS_MAP[model_name]['''sample_size''']
__A = Object()
__A = sample_size
__A = sample_rate
__A = 0
__A = UNetaDModel(sample_size=__UpperCamelCase , sample_rate=__UpperCamelCase )
__A = diffusers_model.state_dict()
__A = DiffusionUncond(__UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCamelCase )['''state_dict'''] )
__A = orig_model.diffusion_ema.eval()
__A = orig_model.state_dict()
__A = rename_orig_weights(__UpperCamelCase )
__A = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__A = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCamelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(__UpperCamelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__A = value.squeeze()
__A = value
diffusers_model.load_state_dict(__UpperCamelCase )
__A = 1_0_0
__A = 3_3
__A = IPNDMScheduler(num_train_timesteps=__UpperCamelCase )
__A = torch.manual_seed(__UpperCamelCase )
__A = torch.randn([1, 2, config.sample_size] , generator=__UpperCamelCase ).to(__UpperCamelCase )
__A = torch.linspace(1 , 0 , steps + 1 , device=__UpperCamelCase )[:-1]
__A = get_crash_schedule(__UpperCamelCase )
__A = DanceDiffusionPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
__A = torch.manual_seed(3_3 )
__A = pipe(num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase ).audios
__A = sampling.iplms_sample(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {} )
__A = generated.clamp(-1 , 1 )
__A = (generated - audio).abs().sum()
__A = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , __UpperCamelCase )
print('''Diff max''' , __UpperCamelCase )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase_ = parser.parse_args()
main(args)
| 266
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 0
|
"""simple docstring"""
def A ( snake_case :int ) -> Tuple:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
__UpperCamelCase = False
if num < 0:
__UpperCamelCase = True
__UpperCamelCase = -num
__UpperCamelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : str , UpperCAmelCase : Path , UpperCAmelCase : Union[str, None] = None , UpperCAmelCase : Union[List[str], None] = None , UpperCAmelCase : Union[str, List[str], None] = None , UpperCAmelCase : bool = True , ) -> int:
__lowerCAmelCase: str = [file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )]
if identifier is not None:
__lowerCAmelCase: int = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for n_ in n_identifier:
__lowerCAmelCase: Any = [file for file in files if n_ not in file]
else:
__lowerCAmelCase: Optional[int] = [file for file in files if n_identifier not in file]
__lowerCAmelCase: Tuple = ignore_files or []
ignore_files.append('__init__.py' )
__lowerCAmelCase: Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCAmelCase )
if only_modules:
__lowerCAmelCase: Any = file.split('.' )[0]
try:
__lowerCAmelCase: int = getattr(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = doctest.DocTestSuite(UpperCAmelCase )
__lowerCAmelCase: Dict = unittest.TextTestRunner().run(UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
__lowerCAmelCase: Dict = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCAmelCase ( self : List[str] ) -> Any:
__lowerCAmelCase: Optional[Any] = Path('src/transformers' )
__lowerCAmelCase: Dict = 'modeling'
__lowerCAmelCase: str = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
__lowerCAmelCase: List[str] = Path('src/transformers' )
__lowerCAmelCase: Any = 'tokenization'
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> str:
__lowerCAmelCase: Optional[int] = Path('src/transformers' )
__lowerCAmelCase: int = 'configuration'
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase: int = Path('src/transformers' )
__lowerCAmelCase: str = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase: str = Path('docs/source' )
__lowerCAmelCase: Dict = ['favicon.ico']
self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = XLNetTokenizer
UpperCAmelCase__ : List[str] = XLNetTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = True
def snake_case_ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE_, keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = '<s>'
UpperCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<unk>' )
self.assertEqual(vocab_keys[1], '<s>' )
self.assertEqual(vocab_keys[-1], '<eod>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), 1006 )
def snake_case_ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size, 1000 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = XLNetTokenizer(SCREAMING_SNAKE_CASE_, keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), [285, 46, 10, 170, 382] )
UpperCamelCase : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : str = XLNetTokenizer(SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
], )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['▁he', 'll', 'o'] )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = XLNetTokenizer(SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
], )
@slow
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
UpperCamelCase : List[str] = tokenizer.encode('sequence builders', add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = tokenizer.encode('multi-sequence build', add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self ) -> Optional[Any]:
# fmt: off
UpperCamelCase : str = {'input_ids': [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_, model_name='xlnet-base-cased', revision='c841166438c31ec7ca9a106dee7bb312b73ae511', )
| 119
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( snake_case_,snake_case_,snake_case_,unittest.TestCase ):
_a = StableDiffusionInstructPixaPixPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_A : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_A : int = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
_A : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_A : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(_a )
_A : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_A : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self , _a , _a=0 ) -> Any:
_A : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
_A : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" )
if str(_a ).startswith("""mps""" ):
_A : str = torch.manual_seed(_a )
else:
_A : Any = torch.Generator(device=_a ).manual_seed(_a )
_A : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> List[Any]:
_A : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Any = self.get_dummy_components()
_A : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : List[str] = self.get_dummy_inputs(_a )
_A : Union[str, Any] = sd_pipe(**_a ).images
_A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : List[str] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> List[str]:
_A : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Dict = self.get_dummy_components()
_A : str = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Dict = self.get_dummy_inputs(_a )
_A : Any = """french fries"""
_A : Tuple = sd_pipe(**_a , negative_prompt=_a )
_A : List[Any] = output.images
_A : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Tuple:
_A : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Union[str, Any] = self.get_dummy_components()
_A : str = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Union[str, Any] = self.get_dummy_inputs(_a )
_A : Dict = [inputs["""prompt"""]] * 2
_A : Tuple = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_A : Tuple = torch.from_numpy(_a ).unsqueeze(0 ).to(_a )
_A : List[str] = image / 2 + 0.5
_A : Union[str, Any] = image.permute(0 , 3 , 1 , 2 )
_A : List[Any] = image.repeat(2 , 1 , 1 , 1 )
_A : List[Any] = sd_pipe(**_a ).images
_A : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_A : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> str:
_A : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_A : Optional[Any] = self.get_dummy_components()
_A : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_A : int = StableDiffusionInstructPixaPixPipeline(**_a )
_A : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_A : Tuple = self.get_dummy_inputs(_a )
_A : Any = sd_pipe(**_a ).images
_A : Optional[int] = image[0, -3:, -3:, -1]
_A : List[Any] = [round(_a , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_a ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_A : Union[str, Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self ) -> Optional[Any]:
_A : int = self.get_dummy_components()
_A : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_a )
_A : Optional[int] = VaeImageProcessor(do_resize=_a , do_normalize=_a )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" ) )[0]
_A : Optional[Any] = components["""vae"""]
_A : List[str] = self.get_dummy_inputs_by_type(_a , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_A : int = vae.encode(inputs[image_param] ).latent_dist.mode()
_A : List[str] = pipe(**_a )[0]
_A : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_a , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , _a=0 ) -> Union[str, Any]:
_A : Union[str, Any] = torch.manual_seed(_a )
_A : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_A : Union[str, Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self ) -> Any:
_A : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : str = self.get_inputs()
_A : List[str] = pipe(**_a ).images
_A : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : int = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
_A : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Optional[int] = self.get_inputs()
_A : Optional[Any] = pipe(**_a ).images
_A : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : int = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> Dict:
_A : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a )
_A : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : Any = self.get_inputs()
_A : Tuple = pipe(**_a ).images
_A : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_A : Any = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def a__ ( self ) -> str:
_A : Optional[Any] = 0
def callback_fn(_a , _a , _a ) -> None:
_A : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A : int = latents[0, -3:, -3:, -1]
_A : Any = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_A : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A : str = latents[0, -3:, -3:, -1]
_A : Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_A : Any = False
_A : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
_A : str = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : int = self.get_inputs()
pipe(**_a , callback=_a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a__ ( self ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_a , torch_dtype=torch.floataa )
_A : Tuple = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A : int = self.get_inputs()
_A : str = pipe(**_a )
_A : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def a__ ( self ) -> Tuple:
_A : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_A : Dict = inputs["""image"""].resize((504, 504) )
_A : List[Any] = """timbrooks/instruct-pix2pix"""
_A : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A : int = pipe(**_a )
_A : int = output.images[0]
_A : Dict = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_A : Tuple = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 26
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase_ ( _lowerCamelCase : Dict):
if "img_encoder.pos_embed" in name:
lowercase__ : List[Any] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings")
if "img_encoder.patch_embed.proj" in name:
lowercase__ : str = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection")
if "img_encoder.patch_embed.norm" in name:
lowercase__ : str = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm")
if "img_encoder.layers" in name:
lowercase__ : Any = name.replace("img_encoder.layers" , "vision_model.encoder.stages")
if "blocks" in name and "res" not in name:
lowercase__ : Optional[int] = name.replace("blocks" , "layers")
if "attn" in name and "pre_assign" not in name:
lowercase__ : str = name.replace("attn" , "self_attn")
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase__ : Optional[int] = name.replace("proj" , "out_proj")
if "pre_assign_attn.attn.proj" in name:
lowercase__ : Dict = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj")
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "layer_norm1")
if "norm2" in name and "pre_assign" not in name:
lowercase__ : Tuple = name.replace("norm2" , "layer_norm2")
if "img_encoder.norm" in name:
lowercase__ : List[str] = name.replace("img_encoder.norm" , "vision_model.layernorm")
# text encoder
if "text_encoder.token_embedding" in name:
lowercase__ : int = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding")
if "text_encoder.positional_embedding" in name:
lowercase__ : Optional[int] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "text_encoder.transformer.resblocks." in name:
lowercase__ : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers.")
if "ln_1" in name:
lowercase__ : int = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
lowercase__ : List[str] = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
lowercase__ : Union[str, Any] = name.replace("c_fc" , "fc1")
if "c_proj" in name:
lowercase__ : Any = name.replace("c_proj" , "fc2")
if "text_encoder" in name:
lowercase__ : Optional[Any] = name.replace("text_encoder" , "text_model")
if "ln_final" in name:
lowercase__ : List[Any] = name.replace("ln_final" , "final_layer_norm")
# projection layers
if "img_projector.linear_hidden." in name:
lowercase__ : List[Any] = name.replace("img_projector.linear_hidden." , "visual_projection.")
if "img_projector.linear_out." in name:
lowercase__ : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3.")
if "text_projector.linear_hidden" in name:
lowercase__ : Dict = name.replace("text_projector.linear_hidden" , "text_projection")
if "text_projector.linear_out" in name:
lowercase__ : Optional[Any] = name.replace("text_projector.linear_out" , "text_projection.3")
return name
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str]):
for key in orig_state_dict.copy().keys():
lowercase__ : Union[str, Any] = orig_state_dict.pop(__UpperCamelCase)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase__ : List[Any] = key.split(".")
lowercase__ , lowercase__ : List[str] = int(key_split[2]), int(key_split[4])
lowercase__ : List[str] = config.vision_config.hidden_size
if "weight" in key:
lowercase__ : Dict = val[:dim, :]
lowercase__ : int = val[dim : dim * 2, :]
lowercase__ : Dict = val[-dim:, :]
else:
lowercase__ : Any = val[:dim]
lowercase__ : List[Any] = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase__ : Optional[Any] = key.split(".")
lowercase__ : Any = int(key_split[3])
lowercase__ : Tuple = config.text_config.hidden_size
if "weight" in key:
lowercase__ : Dict = val[:dim, :]
lowercase__ : List[Any] = val[
dim : dim * 2, :
]
lowercase__ : Optional[Any] = val[-dim:, :]
else:
lowercase__ : List[Any] = val[:dim]
lowercase__ : str = val[dim : dim * 2]
lowercase__ : Optional[int] = val[-dim:]
else:
lowercase__ : List[Any] = rename_key(__UpperCamelCase)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase__ : List[Any] = val.squeeze_()
else:
lowercase__ : Optional[int] = val
return orig_state_dict
def lowercase_ ( ):
lowercase__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Tuple = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str="groupvit-gcc-yfcc" , _lowerCamelCase : List[str]=False):
lowercase__ : Optional[int] = GroupViTConfig()
lowercase__ : Optional[int] = GroupViTModel(__UpperCamelCase).eval()
lowercase__ : int = torch.load(__UpperCamelCase , map_location="cpu")["model"]
lowercase__ : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase)
lowercase__ , lowercase__ : Any = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__UpperCamelCase) == 0)
# verify result
lowercase__ : str = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
lowercase__ : List[str] = prepare_img()
lowercase__ : List[str] = processor(text=["a photo of a cat", "a photo of a dog"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="pt")
with torch.no_grad():
lowercase__ : List[Any] = model(**__UpperCamelCase)
if model_name == "groupvit-gcc-yfcc":
lowercase__ : List[Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
lowercase__ : List[str] = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(f'''Model name {model_name} not supported.''')
assert torch.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1E-3)
processor.save_pretrained(__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
print("Successfully saved processor and model to" , __UpperCamelCase)
if push_to_hub:
print("Pushing to the hub...")
processor.push_to_hub(__UpperCamelCase , organization="nielsr")
model.push_to_hub(__UpperCamelCase , organization="nielsr")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
import sys
import turtle
def A ( _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] ) -> Tuple:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : tuple[float, float] , _UpperCAmelCase : int , ) -> Union[str, Any]:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
UpperCAmelCase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
UpperCAmelCase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 339
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 0
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> int:
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
lowerCamelCase_ : Dict =[image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ : str =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ : int =np.concatenate(__UpperCamelCase , axis=0 )
lowerCamelCase_ : Dict =np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
lowerCamelCase_ : Tuple =image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ : Optional[Any] =2.0 * image - 1.0
lowerCamelCase_ : Dict =torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Any =torch.cat(__UpperCamelCase , dim=0 )
return image
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=0.9995 ) -> Union[str, Any]:
if not isinstance(__UpperCamelCase , np.ndarray ):
lowerCamelCase_ : Union[str, Any] =True
lowerCamelCase_ : int =va.device
lowerCamelCase_ : str =va.cpu().numpy()
lowerCamelCase_ : Tuple =va.cpu().numpy()
lowerCamelCase_ : Dict =np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
lowerCamelCase_ : Dict =(1 - t) * va + t * va
else:
lowerCamelCase_ : str =np.arccos(__UpperCamelCase )
lowerCamelCase_ : Any =np.sin(__UpperCamelCase )
lowerCamelCase_ : List[str] =theta_a * t
lowerCamelCase_ : Dict =np.sin(__UpperCamelCase )
lowerCamelCase_ : int =np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ : List[str] =sin_theta_t / sin_theta_a
lowerCamelCase_ : Dict =sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ : Tuple =torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> List[Any]:
lowerCamelCase_ : str =F.normalize(__UpperCamelCase , dim=-1 )
lowerCamelCase_ : List[Any] =F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> Optional[Any]:
for param in model.parameters():
lowerCamelCase_ : Dict =value
class lowercase__ ( snake_case_ ):
def __init__( self : Union[str, Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , snake_case__ : CLIPFeatureExtractor , snake_case__ : int=None , snake_case__ : int=None , snake_case__ : int=None , ):
super().__init__()
self.register_modules(
vae=snake_case__ , text_encoder=snake_case__ , clip_model=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , coca_model=snake_case__ , coca_tokenizer=snake_case__ , coca_transform=snake_case__ , )
lowerCamelCase_ : Union[str, Any] =(
feature_extractor.size
if isinstance(feature_extractor.size , snake_case__ )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ : Tuple =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case__ )
set_requires_grad(self.clip_model , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : int =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.enable_attention_slicing(snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any ):
# get the original timestep using init_timestep
lowerCamelCase_ : Optional[int] =min(int(num_inference_steps * strength ) , snake_case__ )
lowerCamelCase_ : Optional[int] =max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ : Optional[Any] =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None ):
if not isinstance(snake_case__ , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(snake_case__ )}""" )
lowerCamelCase_ : List[str] =image.to(device=snake_case__ , dtype=snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Union[str, Any] =[
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
lowerCamelCase_ : List[str] =torch.cat(snake_case__ , dim=0 )
else:
lowerCamelCase_ : Optional[int] =self.vae.encode(snake_case__ ).latent_dist.sample(snake_case__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : List[str] =0.18_215 * init_latents
lowerCamelCase_ : List[Any] =init_latents.repeat_interleave(snake_case__ , dim=0 )
lowerCamelCase_ : Tuple =randn_tensor(init_latents.shape , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
lowerCamelCase_ : Dict =self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : Dict =init_latents
return latents
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : int =self.coca_transform(snake_case__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[Any] =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ : int =self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int ):
lowerCamelCase_ : Optional[Any] =self.feature_extractor.preprocess(snake_case__ )
lowerCamelCase_ : List[str] =torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ : Any =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : Union[str, Any] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : List[str] =image_embeddings_clip.repeat_interleave(snake_case__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , ):
lowerCamelCase_ : Union[str, Any] =latents.detach().requires_grad_()
lowerCamelCase_ : List[Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : Union[str, Any] =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ : int =self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ : int =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ : Union[str, Any] =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ : List[str] =torch.sqrt(snake_case__ )
lowerCamelCase_ : Optional[int] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : Any =self.scheduler.sigmas[index]
lowerCamelCase_ : Optional[int] =latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : List[Any] =1 / 0.18_215 * sample
lowerCamelCase_ : Tuple =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : List[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Any =transforms.Resize(self.feature_extractor_size )(snake_case__ )
lowerCamelCase_ : Optional[Any] =self.normalize(snake_case__ ).to(latents.dtype )
lowerCamelCase_ : List[Any] =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : Any =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : Union[str, Any] =spherical_dist_loss(snake_case__ , snake_case__ ).mean() * clip_guidance_scale
lowerCamelCase_ : Tuple =-torch.autograd.grad(snake_case__ , snake_case__ )[0]
if isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : Optional[Any] =latents.detach() + grads * (sigma**2)
lowerCamelCase_ : List[str] =noise_pred_original
else:
lowerCamelCase_ : Any =noise_pred_original - torch.sqrt(snake_case__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = 512 , snake_case__ : Optional[int] = 512 , snake_case__ : float = 0.6 , snake_case__ : Optional[int] = 50 , snake_case__ : Optional[float] = 7.5 , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[float] = 100 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : float = 0.8 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , ):
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(snake_case__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(snake_case__ , torch.Generator ) and batch_size > 1:
lowerCamelCase_ : Optional[Any] =[generator] + [None] * (batch_size - 1)
lowerCamelCase_ : Union[str, Any] =[
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ : str =[x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ : List[str] =", ".join(snake_case__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : Union[str, Any] =self.get_image_description(snake_case__ )
if style_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : Union[str, Any] =self.get_image_description(snake_case__ )
# get prompt text embeddings for content and style
lowerCamelCase_ : Any =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : Union[str, Any] =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : str =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : List[Any] =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : Optional[Any] =slerp(snake_case__ , snake_case__ , snake_case__ )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ : Optional[Any] =text_embeddings.repeat_interleave(snake_case__ , dim=0 )
# set timesteps
lowerCamelCase_ : Optional[int] ="offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ : Tuple ={}
if accepts_offset:
lowerCamelCase_ : List[Any] =1
self.scheduler.set_timesteps(snake_case__ , **snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =self.get_timesteps(snake_case__ , snake_case__ , self.device )
lowerCamelCase_ : Dict =timesteps[:1].repeat(snake_case__ )
# Preprocess image
lowerCamelCase_ : Union[str, Any] =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : Dict =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : Tuple =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : Dict =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : int =slerp(snake_case__ , snake_case__ , snake_case__ )
if clip_guidance_scale > 0:
lowerCamelCase_ : Optional[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : Tuple =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : List[str] =slerp(
snake_case__ , snake_case__ , snake_case__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : Optional[Any] =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[Any] =content_text_input.input_ids.shape[-1]
lowerCamelCase_ : List[Any] =self.tokenizer([""] , padding="max_length" , max_length=snake_case__ , return_tensors="pt" )
lowerCamelCase_ : Tuple =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ : Optional[int] =uncond_embeddings.repeat_interleave(snake_case__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : List[str] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : str =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : Union[str, Any] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ : Dict =torch.randn(snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to(
self.device )
else:
lowerCamelCase_ : Optional[int] =torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : Optional[int] =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : str =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : Dict ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[Any] ={}
if accepts_eta:
lowerCamelCase_ : Any =eta
# check if the scheduler accepts generator
lowerCamelCase_ : Optional[int] ="generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ : str =generator
with self.progress_bar(total=snake_case__ ):
for i, t in enumerate(snake_case__ ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : str =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Union[str, Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : List[Any] =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : Any =noise_pred.chunk(2 )
lowerCamelCase_ : Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ : List[str] =(
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ : List[Any] =self.cond_fn(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : int =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : Any =1 / 0.18_215 * latents
lowerCamelCase_ : Optional[int] =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : Any =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : int =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Optional[int] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 144
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase : str = {
# 1536-bit
5: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'generator': 2,
},
}
class _A:
"""simple docstring"""
def __init__( self , _A = 14 ):
if group not in primes:
raise ValueError('Unsupported Group' )
__A : int = primes[group]['prime']
__A : Optional[int] = primes[group]['generator']
__A : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase_ ( self ):
return hex(self.__private_key )[2:]
def UpperCAmelCase_ ( self ):
__A : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase_ ( self , _A ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase_ ( self , _A ):
__A : str = int(_A , base=16 )
if not self.is_valid_public_key(_A ):
raise ValueError('Invalid public key' )
__A : Optional[Any] = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase_ ( _A , _A ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase_ ( _A , _A , _A = 14 ):
__A : List[str] = int(_A , base=16 )
__A : List[Any] = int(_A , base=16 )
__A : List[Any] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError('Invalid public key' )
__A : Any = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase ( lowercase : list , lowercase : list ) -> Any:
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
_a = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _lowerCamelCase ( lowercase : list , lowercase : list ) -> Dict:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def _lowerCamelCase ( lowercase : list , lowercase : list ) -> Any:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def _lowerCamelCase ( lowercase : list ) -> Optional[Any]:
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
_a = len(__UpperCamelCase )
_a = matrix_length // 2
_a = [[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase )]
_a = [
[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )
]
_a = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
_a = [[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def _lowerCamelCase ( lowercase : list ) -> Tuple:
return len(__UpperCamelCase ), len(matrix[0] )
def _lowerCamelCase ( lowercase : list ) -> str:
print("\n".join(str(__UpperCamelCase ) for line in matrix ) )
def _lowerCamelCase ( lowercase : list , lowercase : list ) -> Union[str, Any]:
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase , __UpperCamelCase )
_a , _a , _a , _a = split_matrix(__UpperCamelCase )
_a , _a , _a , _a = split_matrix(__UpperCamelCase )
_a = actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
_a = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
_a = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
_a = actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
_a = actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
_a = actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
_a = actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
_a = matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
_a = matrix_addition(__UpperCamelCase , __UpperCamelCase )
_a = matrix_addition(__UpperCamelCase , __UpperCamelCase )
_a = matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
# construct the new matrix from our 4 quadrants
_a = []
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _lowerCamelCase ( lowercase : list , lowercase : list ) -> Tuple:
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
_a = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__UpperCamelCase )
_a = matrix_dimensions(__UpperCamelCase )
_a = matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_a = max(*__UpperCamelCase , *__UpperCamelCase )
_a = int(math.pow(2 , math.ceil(math.loga(__UpperCamelCase ) ) ) )
_a = matrixa
_a = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_a = actual_strassen(__UpperCamelCase , __UpperCamelCase )
# Removing the additional zeros
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase_ : str = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase_ : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 63
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
UpperCAmelCase_ : Optional[Any] = '▁'
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = ['input_ids', 'token_type_ids']
A_ : List[Any] = FNetTokenizer
def __init__( self , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=True , __snake_case=True , __snake_case="<unk>" , __snake_case="[SEP]" , __snake_case="<pad>" , __snake_case="[CLS]" , __snake_case="[MASK]" , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case , normalized=__snake_case )
if isinstance(__snake_case , __snake_case )
else mask_token
)
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
_SCREAMING_SNAKE_CASE : int = remove_space
_SCREAMING_SNAKE_CASE : List[Any] = keep_accents
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
_SCREAMING_SNAKE_CASE : Union[str, Any] = False if not self.vocab_file else True
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
_SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
_SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : str = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = inspect.getfile(accelerate.test_utils )
__A = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__A = test_metrics
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
debug_launcher(self.test_metrics.main, num_processes=1 )
@require_cpu
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.' )
__A = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowerCamelCase, env=os.environ.copy() )
| 266
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from math import pi, sqrt, tan
def _a ( SCREAMING_SNAKE_CASE : float ) -> int:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Any:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _a ( SCREAMING_SNAKE_CASE : float ) -> str:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE : float ) -> Any:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Optional[int]:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Union[str, Any]:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCAmelCase: int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> str:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> str:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Tuple:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _a ( SCREAMING_SNAKE_CASE : float ) -> Optional[int]:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> List[str]:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Any:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCAmelCase: str = (sidea + sidea + sidea) / 2
__lowerCAmelCase: str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> str:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Optional[Any]:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _a ( SCREAMING_SNAKE_CASE : float ) -> Union[str, Any]:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Optional[int]:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> Union[str, Any]:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ) -> Tuple:
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(f"Square: {area_square(1_0) = }")
print(f"Triangle: {area_triangle(1_0, 1_0) = }")
print(f"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(f"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(f"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(f"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(f"Circle: {area_circle(2_0) = }")
print(f"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"Cube: {surface_area_cube(2_0) = }")
print(f"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(f"Sphere: {surface_area_sphere(2_0) = }")
print(f"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(f"Cone: {surface_area_cone(1_0, 2_0) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(f"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(f"Torus: {surface_area_torus(2_0, 1_0) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(f"Square: {area_reg_polygon(4, 1_0) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 322
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCAmelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
__UpperCAmelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
UpperCamelCase : int = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = {'BertModelTest': 'BertModelTester'}
UpperCamelCase : Optional[Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Any:
UpperCamelCase : Union[str, Any] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCamelCase : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCamelCase : Any = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
| 119
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase ( snake_case_,unittest.TestCase ):
_a = FlaxAutoencoderKL
@property
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = 4
_A : Union[str, Any] = 3
_A : Any = (32, 32)
_A : Union[str, Any] = jax.random.PRNGKey(0 )
_A : Dict = jax.random.uniform(_a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self ) -> str:
_A : Optional[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_A : Optional[int] = self.dummy_input
return init_dict, inputs_dict
| 26
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase_ ( ):
lowercase__ : List[Any] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]")
lowercase__ : int = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase)
# Let's go
lowercase__ : Optional[int] = parser.parse_args()
if not hasattr(__UpperCamelCase , "func"):
parser.print_help()
exit(1)
# Run
lowercase__ : Optional[Any] = args.func(__UpperCamelCase)
service.run()
if __name__ == "__main__":
main()
| 87
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( snake_case_ ):
def __init__( self : List[Any] , *A : Union[str, Any] , **A : int) -> Dict:
"""simple docstring"""
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A , )
super().__init__(*A , **A)
| 339
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
A__ : int = {'UserAgent': UserAgent().random}
def _snake_case ( lowerCamelCase__ : int ) -> str:
lowerCamelCase_ : Optional[Any] =script.contents[0]
lowerCamelCase_ : List[str] =json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase__ :
def __init__( self : str , snake_case__ : int ):
lowerCamelCase_ : Tuple =F"""https://www.instagram.com/{username}/"""
lowerCamelCase_ : Tuple =self.get_json()
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Optional[Any] =requests.get(self.url , headers=snake_case__ ).text
lowerCamelCase_ : Dict =BeautifulSoup(snake_case__ , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : int ):
return F"""{self.__class__.__name__}(\'{self.username}\')"""
def __str__( self : Dict ):
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self.user_data["username"]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.user_data["full_name"]
@property
def UpperCAmelCase__ ( self : Any ):
return self.user_data["biography"]
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self.user_data["business_email"]
@property
def UpperCAmelCase__ ( self : Tuple ):
return self.user_data["external_url"]
@property
def UpperCAmelCase__ ( self : str ):
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase__ ( self : Dict ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase__ ( self : List[str] ):
return self.user_data["is_verified"]
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self.user_data["is_private"]
def _snake_case ( lowerCamelCase__ : str = "github" ) -> Union[str, Any]:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
lowerCamelCase_ : int =InstagramUser(__UpperCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __UpperCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Optional[int] = InstagramUser('github')
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 144
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__a :int = True
except ImportError:
__a :Optional[Any] = False
try:
from torch.hub import _get_torch_home
__a :Optional[Any] = _get_torch_home()
except ImportError:
__a :Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__a :Optional[Any] = os.path.join(torch_cache_home, 'transformers')
__a :int = 'https://cdn.huggingface.co'
__a :Any = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__a :Optional[Any] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__a :str = os.path.join(PATH, 'config.yaml')
__a :str = os.path.join(PATH, 'attributes.txt')
__a :Optional[Any] = os.path.join(PATH, 'objects.txt')
__a :Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__a :Dict = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__a :List[Any] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__a :List[str] = 'pytorch_model.bin'
__a :Tuple = 'config.yaml'
def __snake_case ( __UpperCamelCase : Optional[Any]=OBJECTS ,__UpperCamelCase : List[str]=ATTRIBUTES ):
"""simple docstring"""
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
A_ = []
with open(__UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = OrderedDict()
with open(__UpperCamelCase ,"rb" ) as f:
A_ = pkl.load(__UpperCamelCase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
A_ = ckp.pop(__UpperCamelCase )
if isinstance(__UpperCamelCase ,np.ndarray ):
A_ = torch.tensor(__UpperCamelCase )
else:
assert isinstance(__UpperCamelCase ,torch.tensor ), type(__UpperCamelCase )
A_ = v
return r
class _a :
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {}
def __init__( self : str , UpperCAmelCase : dict , UpperCAmelCase : str = "root" , UpperCAmelCase : List[str]=0 ):
A_ = name
A_ = level
A_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ = copy.deepcopy(UpperCAmelCase )
A_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
A_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
A_ = d
def __repr__( self : Optional[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any ):
A_ = val
A_ = val
A_ = key.split("." )
A_ = len(UpperCAmelCase ) - 1
A_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , ".".join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
A_ = val
else:
A_ = pointer[l]
def __A ( self : List[str] ):
return self._pointer
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : int ):
with open(f'''{file_name}''' , "w" ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def __A ( UpperCAmelCase : Optional[int] ):
with open(UpperCAmelCase ) as stream:
A_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self : str ):
A_ = " "
if self._name != "root":
A_ = f'''{t * (self._level-1)}{self._name}:\n'''
else:
A_ = ""
A_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n'''
A_ = level
return r[:-1]
@classmethod
def __A ( cls : Optional[Any] , UpperCAmelCase : str , **UpperCAmelCase : str ):
A_ , A_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def __A ( cls : int , UpperCAmelCase : str , **UpperCAmelCase : int ):
A_ = kwargs.pop("cache_dir" , UpperCAmelCase )
A_ = kwargs.pop("force_download" , UpperCAmelCase )
A_ = kwargs.pop("resume_download" , UpperCAmelCase )
A_ = kwargs.pop("proxies" , UpperCAmelCase )
A_ = kwargs.pop("local_files_only" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
A_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
A_ = pretrained_model_name_or_path
else:
A_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
A_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
A_ = "Can't load config for"
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(UpperCAmelCase ), kwargs
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = torch.load("dump.pt" ,map_location=in_tensor.device )
A_ = in_tensor.numpy()
A_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__UpperCamelCase ,__UpperCamelCase ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = urlparse(__UpperCamelCase )
return parsed.scheme in ("http", "https")
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : int=0 ,__UpperCamelCase : int=None ,):
"""simple docstring"""
A_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join("{}/{}".format(__UpperCamelCase ,__UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
A_ = {"user-agent": ua}
if resume_size > 0:
A_ = "bytes=%d-" % (resume_size,)
A_ = requests.get(__UpperCamelCase ,stream=__UpperCamelCase ,proxies=__UpperCamelCase ,headers=__UpperCamelCase )
if response.status_code == 416: # Range not satisfiable
return
A_ = response.headers.get("Content-Length" )
A_ = resume_size + int(__UpperCamelCase ) if content_length is not None else None
A_ = tqdm(
unit="B" ,unit_scale=__UpperCamelCase ,total=__UpperCamelCase ,initial=__UpperCamelCase ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__UpperCamelCase ) )
temp_file.write(__UpperCamelCase )
progress.close()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any=None ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Any=10 ,__UpperCamelCase : int=False ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = None
if not local_files_only:
try:
A_ = requests.head(__UpperCamelCase ,allow_redirects=__UpperCamelCase ,proxies=__UpperCamelCase ,timeout=__UpperCamelCase )
if response.status_code == 200:
A_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ = url_to_filename(__UpperCamelCase ,__UpperCamelCase )
# get cache path to put the file
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__UpperCamelCase ):
return cache_path
else:
A_ = [
file
for file in fnmatch.filter(os.listdir(__UpperCamelCase ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(__UpperCamelCase ) > 0:
return os.path.join(__UpperCamelCase ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(__UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ = cache_path + ".lock"
with FileLock(__UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(__UpperCamelCase ,"a+b" ) as f:
yield f
A_ = _resumable_file_manager
if os.path.exists(__UpperCamelCase ):
A_ = os.stat(__UpperCamelCase ).st_size
else:
A_ = 0
else:
A_ = partial(tempfile.NamedTemporaryFile ,dir=__UpperCamelCase ,delete=__UpperCamelCase )
A_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,__UpperCamelCase ,temp_file.name ,)
http_get(
__UpperCamelCase ,__UpperCamelCase ,proxies=__UpperCamelCase ,resume_size=__UpperCamelCase ,user_agent=__UpperCamelCase ,)
os.replace(temp_file.name ,__UpperCamelCase )
A_ = {"url": url, "etag": etag}
A_ = cache_path + ".json"
with open(__UpperCamelCase ,"w" ) as meta_file:
json.dump(__UpperCamelCase ,__UpperCamelCase )
return cache_path
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = url.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
A_ = url_hash.hexdigest()
if etag:
A_ = etag.encode("utf-8" )
A_ = shaaaa(__UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : List[str]=None ,__UpperCamelCase : Any=False ,__UpperCamelCase : Optional[int]=None ,__UpperCamelCase : Optional[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[Any]=False ,):
"""simple docstring"""
if cache_dir is None:
A_ = TRANSFORMERS_CACHE
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = str(__UpperCamelCase )
if is_remote_url(__UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
A_ = get_from_cache(
__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,user_agent=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
elif os.path.exists(__UpperCamelCase ):
# File, and it exists.
A_ = url_or_filename
elif urlparse(__UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(__UpperCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(__UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(__UpperCamelCase ) and not tarfile.is_tarfile(__UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ = os.path.split(__UpperCamelCase )
A_ = output_file.replace("." ,"-" ) + "-extracted"
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
if os.path.isdir(__UpperCamelCase ) and os.listdir(__UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ = output_path + ".lock"
with FileLock(__UpperCamelCase ):
shutil.rmtree(__UpperCamelCase ,ignore_errors=__UpperCamelCase )
os.makedirs(__UpperCamelCase )
if is_zipfile(__UpperCamelCase ):
with ZipFile(__UpperCamelCase ,"r" ) as zip_file:
zip_file.extractall(__UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(__UpperCamelCase ):
A_ = tarfile.open(__UpperCamelCase )
tar_file.extractall(__UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(__UpperCamelCase ) )
return output_path_extracted
return output_path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any="," ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
with open(__UpperCamelCase ) as f:
A_ = eval(f.read() )
else:
A_ = requests.get(__UpperCamelCase )
try:
A_ = requests.json()
except Exception:
A_ = req.content.decode()
assert data is not None, "could not connect"
try:
A_ = eval(__UpperCamelCase )
except Exception:
A_ = data.split("\n" )
req.close()
return data
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = requests.get(__UpperCamelCase )
A_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__UpperCamelCase )
with open(__UpperCamelCase ,"rb" ) as stream:
A_ = pkl.load(__UpperCamelCase )
A_ = weights.pop("model" )
A_ = {}
for k, v in model.items():
A_ = torch.from_numpy(__UpperCamelCase )
if "running_var" in k:
A_ = torch.tensor([0] )
A_ = k.replace("running_var" ,"num_batches_tracked" )
A_ = zero
return new
def __snake_case ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(__UpperCamelCase ,os.pardir ) )}/demo.ipynb''' )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int]="RGB" ):
"""simple docstring"""
assert isinstance(__UpperCamelCase ,__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
A_ = cva.imread(__UpperCamelCase )
else:
A_ = get_image_from_url(__UpperCamelCase )
assert img is not None, f'''could not connect to: {im}'''
A_ = cva.cvtColor(__UpperCamelCase ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
A_ = img[:, :, ::-1]
return img
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str]=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(__UpperCamelCase ) ,__UpperCamelCase ))
| 312
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _A:
"""simple docstring"""
def __init__( self , _A , ):
__A : List[str] = parent
__A : Dict = 13
__A : Optional[int] = 7
__A : str = True
__A : Any = True
__A : Dict = True
__A : str = True
__A : str = True
__A : Optional[int] = False
__A : Any = False
__A : Any = False
__A : Union[str, Any] = 2
__A : Tuple = 99
__A : Optional[int] = 0
__A : Tuple = 32
__A : Dict = 2
__A : Union[str, Any] = 4
__A : List[str] = 0.1
__A : Dict = 0.1
__A : List[Any] = 512
__A : str = 16
__A : int = 2
__A : Optional[int] = 0.0_2
__A : List[str] = 3
__A : str = 4
__A : Optional[Any] = 'last'
__A : Union[str, Any] = True
__A : Dict = None
__A : Union[str, Any] = 0
def UpperCAmelCase_ ( self ):
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : str = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__A : Optional[Any] = None
if self.use_input_lengths:
__A : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A : List[Any] = None
if self.use_token_type_ids:
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A : Optional[Any] = None
__A : Union[str, Any] = None
__A : Tuple = None
if self.use_labels:
__A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__A : str = ids_tensor([self.batch_size] , self.num_choices )
__A : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : str = TFFlaubertModel(config=_A )
__A : int = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__A : Optional[Any] = model(_A )
__A : Tuple = [input_ids, input_mask]
__A : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = TFFlaubertWithLMHeadModel(_A )
__A : Union[str, Any] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__A : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : List[str] = TFFlaubertForQuestionAnsweringSimple(_A )
__A : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths}
__A : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Union[str, Any] = TFFlaubertForSequenceClassification(_A )
__A : Optional[Any] = {'input_ids': input_ids, 'lengths': input_lengths}
__A : Tuple = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Union[str, Any] = self.num_labels
__A : Union[str, Any] = TFFlaubertForTokenClassification(config=_A )
__A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__A : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : Any = self.num_choices
__A : Optional[Any] = TFFlaubertForMultipleChoice(config=_A )
__A : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : List[Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__A : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__A : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Union[str, Any] = config_and_inputs
__A : Optional[Any] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class _A( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : int = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase : int = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase : int = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : Dict = False
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase_ ( self ):
__A : Tuple = TFFlaubertModelTester(self )
__A : str = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCAmelCase_ ( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCAmelCase_ ( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFFlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
@require_sentencepiece
@require_tokenizers
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__A : List[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__A : Tuple = model(_A )[0]
__A : List[Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__A : int = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 280
|
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase__ ( self : List[Any] ):
_a , _a = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=__a , dtype=jnp.bfloataa )
_a , _a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__a , from_pt=__a , dtype=jnp.bfloataa )
_a = controlnet_params
_a = "bird"
_a = jax.device_count()
_a = pipe.prepare_text_inputs([prompts] * num_samples )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
_a = pipe.prepare_image_inputs([canny_image] * num_samples )
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(__a , jax.device_count() )
_a = replicate(__a )
_a = shard(__a )
_a = shard(__a )
_a = pipe(
prompt_ids=__a , image=__a , params=__a , prng_seed=__a , num_inference_steps=50 , jit=__a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_a = images[0, 2_53:2_56, 2_53:2_56, -1]
_a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase__ ( self : List[str] ):
_a , _a = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=__a , dtype=jnp.bfloataa )
_a , _a = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=__a , from_pt=__a , dtype=jnp.bfloataa )
_a = controlnet_params
_a = "Chef in the kitchen"
_a = jax.device_count()
_a = pipe.prepare_text_inputs([prompts] * num_samples )
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
_a = pipe.prepare_image_inputs([pose_image] * num_samples )
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(__a , jax.device_count() )
_a = replicate(__a )
_a = shard(__a )
_a = shard(__a )
_a = pipe(
prompt_ids=__a , image=__a , params=__a , prng_seed=__a , num_inference_steps=50 , jit=__a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_a = images[0, 2_53:2_56, 2_53:2_56, -1]
_a = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 63
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__a :int = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int = 101 ):
A_ = length
def __len__( self : int ):
return self.length
def __getitem__( self : Optional[int] , UpperCAmelCase : Optional[int] ):
return i
class _a :
"""simple docstring"""
def __call__( self : Any , UpperCAmelCase : Optional[Any] ):
return {"input_ids": torch.tensor(UpperCAmelCase ), "labels": torch.tensor(UpperCAmelCase )}
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A_ = nn.Linear(120 , 80 )
def __A ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_neuroncore
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class _a ( snake_case_ ):
"""simple docstring"""
@require_torch_multi_gpu
def __A ( self : List[str] ):
A_ = f'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''--output_dir {output_dir}'''.split()
A_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__a :Union[str, Any] = HfArgumentParser((TrainingArguments,))
__a :Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__a :int = DummyDataset(dataset_length)
def __snake_case ( __UpperCamelCase : EvalPrediction ):
"""simple docstring"""
A_ = list(range(len(__UpperCamelCase ) ) )
A_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
__a :str = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__a :str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Optional[int] = 2
__a :List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__a :str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__a :Union[str, Any] = None
| 312
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.