code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase_ = Mapping[str, np.ndarray]
lowercase_ = Mapping[str, Any] # Is a nested dict.
lowercase_ = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_UpperCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_UpperCamelCase : Optional[Sequence[int]] = None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Protein:
lowercase__ = R'(\[[A-Z]+\]\n)'
lowercase__ = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
lowercase__ = ['N', 'CA', 'C']
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = 'X' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> List[str]:
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
lowercase__ = ['N/A']
pdb_headers.append(F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = []
lowercase__ = pdb_str.split('\n' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
lowercase__ = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['N/A']]
def make_parent_line(_SCREAMING_SNAKE_CASE ) -> str:
return F"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['N/A']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = residue_constants.restypes + ['X']
def res_atoa(_SCREAMING_SNAKE_CASE ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
lowercase__ = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = 'ATOM'
lowercase__ = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else F""" {atom_name}"""
lowercase__ = ''
lowercase__ = ''
lowercase__ = 1.0_0
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''
lowercase__ = 'A'
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = 'TER'
lowercase__ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
from __future__ import annotations
import time
import numpy as np
lowercase_ = [8, 5, 9, 7]
lowercase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , a : list[int] , a : list[list[int]] , a : list[list[int]] , )-> None:
"""simple docstring"""
lowercase__ = claim_vector
lowercase__ = allocated_resources_table
lowercase__ = maximum_claim_table
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE_ ( self : str )-> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(a ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **a : List[str] )-> None:
"""simple docstring"""
lowercase__ = self.__need()
lowercase__ = self.__allocated_resources_table
lowercase__ = self.__available_resources()
lowercase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
lowercase__ = False
for each_need in need_list:
lowercase__ = True
for index, need in enumerate(a ):
if need > available_resources[index]:
lowercase__ = False
break
if execution:
lowercase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase__ = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(a )
# update available/freed resources stack
lowercase__ = np.array(a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(a ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(a ) + 1}"""
+ ' '.join(f"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Initialise PyTorch model
lowercase__ = FunnelConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ = FunnelBaseModel(_SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase () -> Optional[int]:
lowercase__ = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
lowercase__ = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DownloadCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
ServeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
UserCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
AddNewModelLikeCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
LfsCommands.register_subcommand(_SCREAMING_SNAKE_CASE )
PTtoTFCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from math import sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10001 ) -> int:
lowercase__ = 0
lowercase__ = 1
while count != nth and number < 3:
number += 1
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(_SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from math import sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000000 ) -> int:
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = 'vit_msn'
def __init__( self : Union[str, Any] , a : List[Any]=768 , a : Optional[Any]=12 , a : Optional[int]=12 , a : str=3_072 , a : Tuple="gelu" , a : Dict=0.0 , a : Dict=0.0 , a : Optional[Any]=0.02 , a : Any=1E-0_6 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Tuple=3 , a : List[Any]=True , **a : Any , )-> Tuple:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowercase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
for attribute in key.split('.' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase__ = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
lowercase__ = 'weight'
else:
lowercase__ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = full_name.split('conv_layers.' )[-1]
lowercase__ = name.split('.' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = full_name.split('adaptor.' )[-1]
lowercase__ = name.split('.' )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowercase__ = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowercase__ = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowercase__ = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowercase__ = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowercase__ = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
lowercase__ = WavaVecaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , add_adapter=_SCREAMING_SNAKE_CASE , adapter_stride=_SCREAMING_SNAKE_CASE , adapter_kernel_size=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , output_hidden_size=_SCREAMING_SNAKE_CASE , )
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(_SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase__ = MBartForCausalLM(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowercase__ = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
lowercase__ = False
lowercase__ = MBartaaTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = 'mbart50'
lowercase__ = 'wav2vec2'
lowercase__ = tokenizer.eos_token_id
lowercase__ = 250004
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250_004, type=int, help="""`decoder_start_token_id` of model config""")
lowercase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
try:
lowercase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ = strtobool(_SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
lowercase_ = parse_flag_from_env("""RUN_SLOW""", default=False)
lowercase_ = parse_flag_from_env("""RUN_REMOTE""", default=False)
lowercase_ = parse_flag_from_env("""RUN_LOCAL""", default=True)
lowercase_ = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
lowercase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
lowercase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
lowercase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
lowercase_ = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
lowercase__ = unittest.skip('test requires faiss' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
try:
import regex # noqa
except ImportError:
lowercase__ = unittest.skip('test requires regex' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
try:
import elasticsearch # noqa
except ImportError:
lowercase__ = unittest.skip('test requires elasticsearch' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ = unittest.skip('test requires sqlalchemy' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not config.TORCH_AVAILABLE:
lowercase__ = unittest.skip('test requires PyTorch' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
if not config.TF_AVAILABLE:
lowercase__ = unittest.skip('test requires TensorFlow' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not config.JAX_AVAILABLE:
lowercase__ = unittest.skip('test requires JAX' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
if not config.PIL_AVAILABLE:
lowercase__ = unittest.skip('test requires Pillow' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
def _require_spacy_model(_SCREAMING_SNAKE_CASE ):
try:
import spacy # noqa F401
spacy.load(_SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_SCREAMING_SNAKE_CASE ) )(_SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(_SCREAMING_SNAKE_CASE )
else:
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ = unittest.skip('test is slow' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
if not _run_local_tests or _run_local_tests == 0:
lowercase__ = unittest.skip('test is local' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ = unittest.skip('test is packaged' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ = unittest.skip('test requires remote' )(_SCREAMING_SNAKE_CASE )
return test_case
def __UpperCamelCase (*_SCREAMING_SNAKE_CASE ) -> Tuple:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_SCREAMING_SNAKE_CASE ) and name.startswith('test' ):
for decorator in decorators:
lowercase__ = decorator(_SCREAMING_SNAKE_CASE )
setattr(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cls
return decorate
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
pass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = 1
_UpperCamelCase : Optional[Any] = 2
@contextmanager
def __UpperCamelCase (_SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS , _SCREAMING_SNAKE_CASE=1E-16 ) -> Tuple:
lowercase__ = requests.Session().request
def timeout_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Change the url to an invalid url so that the connection hangs
lowercase__ = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
lowercase__ = timeout
try:
return online_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ = url
lowercase__ = e.args[0]
lowercase__ = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
lowercase__ = (max_retry_error,)
raise
def raise_connection_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
raise requests.ConnectionError('Offline mode is enabled.' , request=_SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __UpperCamelCase (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(_SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(_SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase () -> str:
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __UpperCamelCase () -> Tuple:
import gc
gc.collect()
lowercase__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
return deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
try:
return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(_SCREAMING_SNAKE_CASE ).startswith('500' ) or str(_SCREAMING_SNAKE_CASE ).startswith('502' ):
pytest.xfail(str(_SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : List[Any] , a : Any , a : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = returncode
lowercase__ = stdout
lowercase__ = stderr
async def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
while True:
lowercase__ = await stream.readline()
if line:
callback(_SCREAMING_SNAKE_CASE )
else:
break
async def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) )
lowercase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ = []
lowercase__ = []
def tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" ):
lowercase__ = line.decode('utf-8' ).rstrip()
sink.append(_SCREAMING_SNAKE_CASE )
if not quiet:
print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ),
] , timeout=_SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=180 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> _RunOutput:
lowercase__ = asyncio.get_event_loop()
lowercase__ = loop.run_until_complete(
_stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) )
lowercase__ = ' '.join(_SCREAMING_SNAKE_CASE )
if result.returncode > 0:
lowercase__ = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def __UpperCamelCase () -> List[str]:
lowercase__ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
lowercase__ = re.sub(R'^gw' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M )
return int(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = 29500
lowercase__ = pytest_xdist_worker_id()
return port + uniq_delta
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[int]]:
lowercase__ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_SCREAMING_SNAKE_CASE )
return next_generation
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[Image.Image]:
lowercase__ = []
for _ in range(_SCREAMING_SNAKE_CASE ):
# Create output image
lowercase__ = Image.new('RGB' , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) )
lowercase__ = img.load()
# Save cells to image
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
lowercase__ = 255 - cells[y][x] * 255
lowercase__ = (colour, colour, colour)
# Save image
images.append(_SCREAMING_SNAKE_CASE )
lowercase__ = new_generation(_SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
lowercase_ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = "" ) -> dict[str, float]:
lowercase__ = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase__ = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
lowercase__ = soup.find_all('td' , attrs='titleColumn' )
lowercase__ = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__ = get_imdb_top_aaa_movies()
with open(_SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
lowercase__ = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("""T""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (position - 1) // 2
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 2
class SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self : Optional[Any] )-> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
def __len__( self : List[Any] )-> int:
"""simple docstring"""
return self.elements
def __repr__( self : str )-> str:
"""simple docstring"""
return str(self.heap )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> bool:
"""simple docstring"""
return self.elements == 0
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T , a : int )-> None:
"""simple docstring"""
self.heap.append((elem, weight) )
lowercase__ = self.elements
self.elements += 1
self._bubble_up(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase__ , lowercase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__ , lowercase__ = self.heap[0]
self._bubble_down(a )
return elem
def SCREAMING_SNAKE_CASE_ ( self : Any , a : T , a : int )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ = (elem, weight)
if position > 0:
lowercase__ = get_parent_position(a )
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a )
else:
self._bubble_down(a )
else:
self._bubble_down(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__ = get_parent_position(a )
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a , a )
return self._bubble_up(a )
return None
def SCREAMING_SNAKE_CASE_ ( self : str , a : T )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ = get_child_left_position(a )
lowercase__ = get_child_right_position(a )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
if child_left_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
else:
return None
if child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
return None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : int )-> None:
"""simple docstring"""
lowercase__ = self.heap[nodea_pos][0]
lowercase__ = self.heap[nodea_pos][0]
lowercase__ , lowercase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__ = nodea_pos
lowercase__ = nodea_pos
class SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self : Union[str, Any] )-> None:
"""simple docstring"""
lowercase__ = {}
lowercase__ = 0
def __repr__( self : Dict )-> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Tuple )-> int:
"""simple docstring"""
return self.nodes
def SCREAMING_SNAKE_CASE_ ( self : Any , a : T )-> None:
"""simple docstring"""
if node not in self.connections:
lowercase__ = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : T , a : T , a : int )-> None:
"""simple docstring"""
self.add_node(a )
self.add_node(a )
lowercase__ = weight
lowercase__ = weight
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]:
lowercase__ = {node: maxsize for node in graph.connections}
lowercase__ = {node: None for node in graph.connections}
lowercase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__ = priority_queue.extract_min()
lowercase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowercase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowercase__ = node
return dist, parent
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_SCREAMING_SNAKE_CASE )] )
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _SCREAMING_SNAKE_CASE ) ) , x.transpose() ) , _SCREAMING_SNAKE_CASE )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
_SCREAMING_SNAKE_CASE , exog=_SCREAMING_SNAKE_CASE , order=_SCREAMING_SNAKE_CASE , seasonal_order=_SCREAMING_SNAKE_CASE )
lowercase__ = model.fit(disp=_SCREAMING_SNAKE_CASE , maxiter=600 , method='nm' )
lowercase__ = model_fit.predict(1 , len(_SCREAMING_SNAKE_CASE ) , exog=[test_match] )
return result[0]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = regressor.predict(_SCREAMING_SNAKE_CASE )
return y_pred[0]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
train_user.sort()
lowercase__ = np.percentile(_SCREAMING_SNAKE_CASE , 25 )
lowercase__ = np.percentile(_SCREAMING_SNAKE_CASE , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(_SCREAMING_SNAKE_CASE ) - abs(_SCREAMING_SNAKE_CASE ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase_ = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
lowercase_ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase_ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase_ = normalize_df[:, 2].tolist()
lowercase_ = normalize_df[:, 0].tolist()
lowercase_ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase_ = normalize_df[:, [1, 2]].tolist()
lowercase_ = x[: len(x) - 1]
lowercase_ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase_ = total_date[: len(total_date) - 1]
lowercase_ = total_user[: len(total_user) - 1]
lowercase_ = total_match[: len(total_match) - 1]
lowercase_ = total_date[len(total_date) - 1 :]
lowercase_ = total_user[len(total_user) - 1 :]
lowercase_ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase_ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase_ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ''
else:
lowercase__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = dct.pop(_SCREAMING_SNAKE_CASE )
lowercase__ = val
def __UpperCamelCase () -> str:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = ViTConfig()
lowercase__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase__ = True
lowercase__ = int(vit_name[-12:-10] )
lowercase__ = int(vit_name[-9:-6] )
else:
lowercase__ = 1000
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = int(vit_name[-6:-4] )
lowercase__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
lowercase__ = 192
lowercase__ = 768
lowercase__ = 12
lowercase__ = 3
elif vit_name[9:].startswith('small' ):
lowercase__ = 384
lowercase__ = 1536
lowercase__ = 12
lowercase__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
lowercase__ = 768
lowercase__ = 2304
lowercase__ = 8
lowercase__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
lowercase__ = 1024
lowercase__ = 4096
lowercase__ = 24
lowercase__ = 16
elif vit_name[4:].startswith('huge' ):
lowercase__ = 1280
lowercase__ = 5120
lowercase__ = 32
lowercase__ = 16
# load original model from timm
lowercase__ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
lowercase__ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase__ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
lowercase__ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase__ = DeiTImageProcessor(size=config.image_size )
else:
lowercase__ = ViTImageProcessor(size=config.image_size )
lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase__ = encoding['pixel_values']
lowercase__ = model(_SCREAMING_SNAKE_CASE )
if base_model:
lowercase__ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
lowercase__ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = BertTokenizer
_UpperCamelCase : Tuple = BertTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : List[Any] = True
_UpperCamelCase : List[Any] = filter_non_english
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# With lower casing
lowercase__ = self.get_tokenizer(do_lower_case=a )
lowercase__ = self.get_rust_tokenizer(do_lower_case=a )
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer()
lowercase__ = 'a\n\'ll !!to?\'d of, can\'t.'
lowercase__ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase__ = {}
for i, token in enumerate(a ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowercase__ = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowercase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowercase__ = tokenizer.build_inputs_with_special_tokens(a )
lowercase__ = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowercase__ = tokenizer_r.encode_plus(
a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , )
lowercase__ = tokenizer_r.do_lower_case if hasattr(a , 'do_lower_case' ) else False
lowercase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = ['的', '人', '有']
lowercase__ = ''.join(a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ = True
lowercase__ = self.tokenizer_class.from_pretrained(a , **a )
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = tokenizer_p.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.convert_ids_to_tokens(a )
lowercase__ = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a , a )
self.assertListEqual(a , a )
lowercase__ = False
lowercase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
lowercase__ = self.tokenizer_class.from_pretrained(a , **a )
lowercase__ = tokenizer_r.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_p.encode(a , add_special_tokens=a )
lowercase__ = tokenizer_r.convert_ids_to_tokens(a )
lowercase__ = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that only the first Chinese character is not preceded by "##".
lowercase__ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(a )
]
self.assertListEqual(a , a )
self.assertListEqual(a , a )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'efficientformer'
def __init__( self : Any , a : List[int] = [3, 2, 6, 4] , a : List[int] = [48, 96, 224, 448] , a : List[bool] = [True, True, True, True] , a : int = 448 , a : int = 32 , a : int = 4 , a : int = 7 , a : int = 5 , a : int = 8 , a : int = 4 , a : float = 0.0 , a : int = 16 , a : int = 3 , a : int = 3 , a : int = 3 , a : int = 2 , a : int = 1 , a : float = 0.0 , a : int = 1 , a : bool = True , a : bool = True , a : float = 1E-5 , a : str = "gelu" , a : float = 0.02 , a : float = 1E-1_2 , a : int = 224 , a : float = 1E-0_5 , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = hidden_sizes
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = depths
lowercase__ = mlp_expansion_ratio
lowercase__ = downsamples
lowercase__ = dim
lowercase__ = key_dim
lowercase__ = attention_ratio
lowercase__ = resolution
lowercase__ = pool_size
lowercase__ = downsample_patch_size
lowercase__ = downsample_stride
lowercase__ = downsample_pad
lowercase__ = drop_path_rate
lowercase__ = num_metaad_blocks
lowercase__ = distillation
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = image_size
lowercase__ = batch_norm_eps
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowercase__ = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__ = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__ = name[1:]
# figure out how many levels deep the name is
lowercase__ = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(_SCREAMING_SNAKE_CASE )
# read data
lowercase__ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
names.append('/'.join(_SCREAMING_SNAKE_CASE ) )
arrays.append(_SCREAMING_SNAKE_CASE )
logger.info(F"""Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers""" )
# Sanity check
if len(set(_SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})""" )
lowercase__ = list(set(_SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = full_name.split('/' )
lowercase__ = model
lowercase__ = []
for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
lowercase__ = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'embeddings' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'encoder' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'layer' )
lowercase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'pooler' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'token_type_embeddings' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'intermediate' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'weight' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowercase__ = '.'.join(_SCREAMING_SNAKE_CASE )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _SCREAMING_SNAKE_CASE ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , _SCREAMING_SNAKE_CASE ):
lowercase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__ = array.transpose()
if pointer.shape == array.shape:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
lowercase__ = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowercase__ = BertModel(_SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowercase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import random
from typing import Any
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[Any]:
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowercase__ = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
lowercase__ , lowercase__ = data[b], data[a]
return data
if __name__ == "__main__":
lowercase_ = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase_ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import json
import sys
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase__ = json.load(_SCREAMING_SNAKE_CASE )
lowercase__ = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
lowercase__ = results[benchmark_name]
lowercase__ = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
lowercase__ = '| metric |'
lowercase__ = '|--------|'
lowercase__ = '| new / old (diff) |'
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
lowercase__ = benchmark_res[metric_name]
lowercase__ = metric_vals['new']
lowercase__ = metric_vals.get('old' , _SCREAMING_SNAKE_CASE )
lowercase__ = metric_vals.get('diff' , _SCREAMING_SNAKE_CASE )
lowercase__ = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowercase_ = sys.argv[1]
lowercase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = (PNDMScheduler,)
_UpperCamelCase : Optional[int] = (('num_inference_steps', 50),)
def SCREAMING_SNAKE_CASE_ ( self : Any , **a : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**a )
return config
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any]=0 , **a : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**a )
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ = scheduler_class.from_pretrained(a )
new_scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any]=0 , **a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ = scheduler_class.from_pretrained(a )
# copy over dummy past residuals
new_scheduler.set_timesteps(a )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**a )
lowercase__ = scheduler_class(**a )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ = model(a , a )
lowercase__ = scheduler.step_prk(a , a , a ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ = model(a , a )
lowercase__ = scheduler.step_plms(a , a , a ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , 'set_timesteps' ):
scheduler.set_timesteps(a )
elif num_inference_steps is not None and not hasattr(a , 'set_timesteps' ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , 0 , a , **a ).prev_sample
lowercase__ = scheduler.step_prk(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ = scheduler.step_plms(a , 0 , a , **a ).prev_sample
lowercase__ = scheduler.step_plms(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1 )
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = 27
for scheduler_class in self.scheduler_classes:
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ = scheduler.step_prk(a , a , a ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
with self.assertRaises(a ):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction' )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase__ = s_dict.pop(_SCREAMING_SNAKE_CASE )
elif "subsample" in key:
lowercase__ = s_dict.pop(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowercase__ = mam_aaa['args']
lowercase__ = mam_aaa['model']
lowercase__ = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
rename_keys(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['decoder.embed_tokens.weight'].shape[0]
lowercase__ = args.share_decoder_input_output_embed
lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(',' )]
lowercase__ = SpeechaTextConfig(
vocab_size=_SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(_SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=_SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=_SCREAMING_SNAKE_CASE , )
lowercase__ = SpeechaTextForConditionalGeneration(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase__ = lm_head_weights
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase_ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , a : Dict , a : str=13 , a : List[str]=7 , a : int=True , a : Tuple=True , a : str=True , a : List[str]=True , a : str=99 , a : str=32 , a : Tuple=5 , a : Dict=4 , a : Optional[Any]=37 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Any=0.1 , a : Dict=512 , a : Any=16 , a : Tuple=2 , a : Optional[Any]=0.02 , a : Any=4 , )-> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = True
_UpperCamelCase : List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('roberta-base' , from_pt=a )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase_ = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = SavedModel()
lowercase__ = []
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
lowercase__ = json.load(_SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
lowercase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase__ = sorted(_SCREAMING_SNAKE_CASE )
lowercase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_SCREAMING_SNAKE_CASE )
if strict and len(_SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowercase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return int(input_a == input_a == 0 )
def __UpperCamelCase () -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(F"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(F"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(F"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[str]:
if nth_term == "":
return [""]
lowercase__ = int(_SCREAMING_SNAKE_CASE )
lowercase__ = int(_SCREAMING_SNAKE_CASE )
lowercase__ = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(F"""1 / {pow(temp + 1 , int(_SCREAMING_SNAKE_CASE ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("""Enter the last number (nth term) of the P-Series"""))
lowercase_ = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
import random
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = a[left_index]
lowercase__ = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
lowercase__ , lowercase__ = a[i], a[j]
i += 1
lowercase__ , lowercase__ = a[i - 1], a[left_index]
return i - 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if left < right:
lowercase__ = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
lowercase__ , lowercase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def __UpperCamelCase () -> Dict:
lowercase__ = input('Enter numbers separated by a comma:\n' ).strip()
lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = """Usage of script: script_name <size_of_canvas:int>"""
lowercase_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = [[False for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
return canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> None:
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = bool(random.getrandbits(1 ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[list[bool]]:
lowercase__ = np.array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = __judge_point(
_SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase__ = current_canvas.tolist()
return return_canvas
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = 0
lowercase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase__ = pt
if pt:
if alive < 2:
lowercase__ = False
elif alive == 2 or alive == 3:
lowercase__ = True
elif alive > 3:
lowercase__ = False
else:
if alive == 3:
lowercase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ , lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(["""w""", """k"""])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : int = 'distilbert'
_UpperCamelCase : int = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : Optional[int] , a : Union[str, Any]=30_522 , a : List[Any]=512 , a : Optional[Any]=False , a : Any=6 , a : int=12 , a : int=768 , a : int=4 * 768 , a : List[Any]=0.1 , a : Dict=0.1 , a : List[str]="gelu" , a : Union[str, Any]=0.02 , a : Dict=0.1 , a : Optional[Any]=0.2 , a : Optional[int]=0 , **a : Tuple , )-> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = sinusoidal_pos_embds
lowercase__ = n_layers
lowercase__ = n_heads
lowercase__ = dim
lowercase__ = hidden_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation
lowercase__ = initializer_range
lowercase__ = qa_dropout
lowercase__ = seq_classif_dropout
super().__init__(**a , pad_token_id=a )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : Optional[int] , a : List[str]=3 , a : List[str]=32 , a : Optional[Any]=3 , a : List[Any]=10 , a : Union[str, Any]=[8, 16, 32, 64] , a : List[str]=[1, 1, 2, 1] , a : List[str]=True , a : Optional[int]=True , a : Any="relu" , a : Union[str, Any]=3 , a : int=None , a : str=["stage2", "stage3", "stage4"] , a : List[Any]=[2, 3, 4] , a : Union[str, Any]=1 , )-> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(a )
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = num_groups
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Dict , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = BitModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = BitForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : List[Any] , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = BitBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_UpperCamelCase : Dict = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=a )
for name, module in model.named_modules():
if isinstance(a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
def check_hidden_states_output(a : int , a : Optional[Any] , a : Dict ):
lowercase__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a , a ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(a , a , a )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BitModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> Dict:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else ()
_UpperCamelCase : Any = BitConfig
_UpperCamelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = BitModelTester(self )
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase_ = """sshleifer/student_marian_en_ro_6_1"""
lowercase_ = """sshleifer/tiny-mbart"""
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any]=False , a : Optional[Any]=None , a : int=True , a : List[Any]=True , a : Optional[int]=True , a : Optional[Any]=True , )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=a , num_train_epochs=1 , distributed=a , extra_args_str=a , predict_with_generate=a , do_train=a , do_eval=a , do_predict=a , )
lowercase__ = TrainerState.load_from_json(os.path.join(a , 'trainer_state.json' ) ).log_history
if not do_eval:
return
lowercase__ = [log for log in logs if 'eval_loss' in log.keys()]
lowercase__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , a )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=a )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=a , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=a )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
self.run_seqaseq_quick(distributed=a , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=a , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int )-> int:
"""simple docstring"""
lowercase__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
lowercase__ = experiments[experiment_id]
lowercase__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
lowercase__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**a , extra_args_str=data['extra_args_str'] )
lowercase__ = len(re.findall(a , cl.err ) )
self.assertEqual(a , data['n_matches'] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=a , learning_rate=3E-4 , num_train_epochs=10 , distributed=a , )
# Check metrics
lowercase__ = TrainerState.load_from_json(os.path.join(a , 'trainer_state.json' ) ).log_history
lowercase__ = [log for log in logs if 'eval_loss' in log.keys()]
lowercase__ = eval_metrics[0]
lowercase__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , a )
# test if do_predict saves generations and metrics
lowercase__ = os.listdir(a )
lowercase__ = {os.path.basename(a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a : str ) -> Tuple[int, float]:
lowercase__ = '--skip_memory_metrics 0'
lowercase__ = self.run_trainer(
max_len=128 , model_name=a , learning_rate=3E-4 , num_train_epochs=1 , optim=a , distributed=a , extra_args_str=a , do_eval=a , do_predict=a , n_gpus_to_use=1 , )
# Check metrics
lowercase__ = TrainerState.load_from_json(Path(a , 'trainer_state.json' ) ).log_history
lowercase__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
lowercase__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
lowercase__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase__ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
a , a , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
a , a , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
a , a , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def SCREAMING_SNAKE_CASE_ ( self : int , a : int , a : str , a : int , a : float = 3E-3 , a : str = "adafactor" , a : bool = False , a : str = None , a : int = 0 , a : bool = True , a : bool = True , a : bool = True , a : bool = True , a : int = None , )-> Dict:
"""simple docstring"""
lowercase__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(a )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(a )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
lowercase__ = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(a )}
""".split()
lowercase__ = '\n --do_predict\n '.split()
lowercase__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase__ = get_gpu_count()
lowercase__ = get_torch_dist_unique_port()
lowercase__ = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
lowercase__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env() )
else:
lowercase__ = ['run_translation.py'] + args
with patch.object(a , 'argv' , a ):
main()
return output_dir
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any: # picklable for multiprocessing
return x.sum()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: # picklable for multiprocessing
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : int
_UpperCamelCase : str
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
lowercase__ = []
lowercase__ = 1
lowercase__ = [1, 2]
lowercase__ = {'a': 1, 'b': 2}
lowercase__ = {'a': [1, 2], 'b': [3, 4]}
lowercase__ = {'a': {'1': 1}, 'b': 2}
lowercase__ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowercase__ = {}
lowercase__ = []
lowercase__ = 2
lowercase__ = [2, 3]
lowercase__ = {'a': 2, 'b': 3}
lowercase__ = {'a': [2, 3], 'b': [4, 5]}
lowercase__ = {'a': {'1': 2}, 'b': 3}
lowercase__ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
self.assertEqual(map_nested(a , a ) , a )
lowercase__ = 2
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
self.assertEqual(map_nested(a , a , num_proc=a ) , a )
lowercase__ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
lowercase__ = {'a': 2, 'b': 0, 'c': 2}
lowercase__ = {
'a': np.eye(2 ).astype(a ),
'b': np.zeros(3 ).astype(a ),
'c': np.ones(2 ).astype(a ),
}
self.assertEqual(map_nested(a , a , map_numpy=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(a , a , map_numpy=a , num_proc=a ) , a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(a , a , map_numpy=a , num_proc=a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(a ): # can't pickle a local lambda
map_nested(lambda a : x + 1 , a , num_proc=a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
lowercase__ = {'a': 1, 'b': 2}
lowercase__ = {'a': 3, 'b': 4}
lowercase__ = {'a': 5, 'b': 6}
lowercase__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(a , a , a ) ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Any = 'bar'
lowercase__ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(a , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
lowercase__ = {F"""{i}""": i for i in range(_SCREAMING_SNAKE_CASE )}
lowercase__ = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
lowercase__ = layers.Dense(2 )
def gen_random_output():
lowercase__ = tf.random.uniform((1, 3) )
return model(a ).numpy()
with temp_seed(42 , set_tensorflow=a ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_tensorflow=a ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
import torch
def gen_random_output():
lowercase__ = torch.nn.Linear(3 , 2 )
lowercase__ = torch.rand(1 , 3 )
return model(a ).detach().numpy()
with temp_seed(42 , set_pytorch=a ):
lowercase__ = gen_random_output()
with temp_seed(42 , set_pytorch=a ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase__ = gen_random_output()
with temp_seed(42 ):
lowercase__ = gen_random_output()
lowercase__ = gen_random_output()
np.testing.assert_equal(a , a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def __UpperCamelCase () -> int:
lowercase__ = A(x=1 , y='foobar' )
lowercase__ = {'x': 1, 'y': 'foobar'}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
lowercase__ = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
lowercase__ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='foo' )] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
return text.split()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCamelCase () -> Any:
with Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase__ = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase__ = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
def __UpperCamelCase () -> str:
lowercase__ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_SCREAMING_SNAKE_CASE , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_SCREAMING_SNAKE_CASE , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_SCREAMING_SNAKE_CASE , default='data/dump' , help='The dump file prefix.' )
lowercase__ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowercase__ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowercase__ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowercase__ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowercase__ = fp.readlines()
logger.info('Start encoding' )
logger.info(F"""{len(_SCREAMING_SNAKE_CASE )} examples to process.""" )
lowercase__ = []
lowercase__ = 0
lowercase__ = 10000
lowercase__ = time.time()
for text in data:
lowercase__ = F"""{bos} {text.strip()} {sep}"""
lowercase__ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
rslt.append(_SCREAMING_SNAKE_CASE )
iter += 1
if iter % interval == 0:
lowercase__ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase__ = time.time()
logger.info('Finished binarization' )
logger.info(F"""{len(_SCREAMING_SNAKE_CASE )} examples processed.""" )
lowercase__ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase__ = [np.uintaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
else:
lowercase__ = [np.intaa(_SCREAMING_SNAKE_CASE ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as handle:
pickle.dump(rslt_ , _SCREAMING_SNAKE_CASE , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ = TapasConfig.from_json_file(lowercase__ )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.66_4694
snake_case_ = 0.20_7951
snake_case_ = 0.12_1194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.035_2513
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.90_3421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.76_3141
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=lowercase__ )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=lowercase__ )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=lowercase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(lowercase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46
| 1
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=None , __UpperCamelCase=2 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFDeiTModel(config=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFDeiTForMaskedImageModeling(config=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFDeiTForMaskedImageModeling(__UpperCamelCase )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = TFDeiTForImageClassification(__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFDeiTForImageClassification(__UpperCamelCase )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__A = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFDeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDeiTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# forward pass
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__A = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
snake_case_ = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = config.window_size**2
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
snake_case_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A = get_tests_dir('fixtures')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = mock.Mock()
snake_case_ = 5_00
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
snake_case_ = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='test-feature-extractor' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
snake_case_ = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
snake_case_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 46
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A = parser.parse_args()
A = 'cpu'
A = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A = 'path-to-your-trained-model'
A = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A = pipe.to(device)
# to channels last
A = pipe.unet.to(memory_format=torch.channels_last)
A = pipe.vae.to(memory_format=torch.channels_last)
A = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A = torch.randn(2, 4, 64, 64)
A = torch.rand(1) * 999
A = torch.randn(2, 77, 768)
A = (sample, timestep, encoder_hidden_status)
try:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A = 666
A = torch.Generator(device).manual_seed(seed)
A = {'generator': generator}
if args.steps is not None:
A = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 46
| 1
|
from __future__ import annotations
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) < k or k < 0:
raise ValueError('Invalid Input' )
snake_case_ = snake_case_ = sum(array[:k] )
for i in range(len(lowercase__ ) - k ):
snake_case_ = current_sum - array[i] + array[i + k]
snake_case_ = max(lowercase__ , lowercase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A = [randint(-1000, 1000) for i in range(100)]
A = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 46
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = state_dict.pop(lowercase__ )
snake_case_ = val
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case_ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
snake_case_ = value
else:
snake_case_ = value
return new_state_dict
def a(lowercase__ , lowercase__=False ):
'''simple docstring'''
snake_case_ = ''
if is_panoptic:
snake_case_ = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[:256, :]
snake_case_ = in_proj_bias[:256]
snake_case_ = in_proj_weight[256:512, :]
snake_case_ = in_proj_bias[256:512]
snake_case_ = in_proj_weight[-256:, :]
snake_case_ = in_proj_bias[-256:]
def a():
'''simple docstring'''
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case_ = 'resnet101'
if "dc5" in model_name:
snake_case_ = True
snake_case_ = 'panoptic' in model_name
if is_panoptic:
snake_case_ = 250
else:
snake_case_ = 91
snake_case_ = 'huggingface/label-files'
snake_case_ = 'coco-detection-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load image processor
snake_case_ = 'coco_panoptic' if is_panoptic else 'coco_detection'
snake_case_ = ConditionalDetrImageProcessor(format=lowercase__ )
# prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase__ , return_tensors='pt' )
snake_case_ = encoding['pixel_values']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case_ = torch.hub.load('DeppMeng/ConditionalDETR' , lowercase__ , pretrained=lowercase__ ).eval()
snake_case_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case_ = 'conditional_detr.' + src
rename_key(lowercase__ , lowercase__ , lowercase__ )
snake_case_ = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ , is_panoptic=lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case_ = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
snake_case_ = state_dict.pop(lowercase__ )
snake_case_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case_ = state_dict.pop(lowercase__ )
snake_case_ = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
snake_case_ = state_dict.pop(lowercase__ )
snake_case_ = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
snake_case_ = state_dict.pop(lowercase__ )
snake_case_ = val
# finally, create HuggingFace model and load state dict
snake_case_ = ConditionalDetrForSegmentation(lowercase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
model.push_to_hub(repo_id=lowercase__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
snake_case_ = conditional_detr(lowercase__ )
snake_case_ = model(lowercase__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 46
|
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(__UpperCamelCase )
def __getitem__( self , __UpperCamelCase ):
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) - 1
snake_case_ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
snake_case_ = idx
snake_case_ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
snake_case_ = self.get_right_child_idx(__UpperCamelCase )
snake_case_ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
snake_case_ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.heap.append(__UpperCamelCase )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
A = Node('R', -1)
A = Node('B', 6)
A = Node('A', 3)
A = Node('X', 1)
A = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ = TapasConfig.from_json_file(lowercase__ )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.66_4694
snake_case_ = 0.20_7951
snake_case_ = 0.12_1194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.035_2513
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.90_3421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.76_3141
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=lowercase__ )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=lowercase__ )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=lowercase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(lowercase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PerceiverFeatureExtractor']
A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase = "cpu" , __UpperCamelCase = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
snake_case_ = device
snake_case_ = CLIPTokenizerFast.from_pretrained(__UpperCamelCase )
snake_case_ = [0.4814_5466, 0.457_8275, 0.4082_1073]
snake_case_ = [0.2686_2954, 0.2613_0258, 0.2757_7711]
snake_case_ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case_ = torchvision.transforms.Resize(2_24 )
snake_case_ = torchvision.transforms.CenterCrop(2_24 )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.resize(__UpperCamelCase )
snake_case_ = self.center_crop(__UpperCamelCase )
snake_case_ = self.normalize(__UpperCamelCase )
return images
def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.tokenizer(text=__UpperCamelCase , **__UpperCamelCase )
snake_case_ = self.preprocess_img(__UpperCamelCase )
snake_case_ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase=10 , __UpperCamelCase=0.01 , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase="image" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ):
"""simple docstring"""
super().__init__()
snake_case_ = None
snake_case_ = device if device else get_device()
if vqgan:
snake_case_ = vqgan
else:
snake_case_ = load_vqgan(self.device , conf_path=__UpperCamelCase , ckpt_path=__UpperCamelCase )
self.vqgan.eval()
if clip:
snake_case_ = clip
else:
snake_case_ = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
snake_case_ = ProcessorGradientFlow(device=self.device )
snake_case_ = iterations
snake_case_ = lr
snake_case_ = log
snake_case_ = make_grid
snake_case_ = return_val
snake_case_ = quantize
snake_case_ = self.vqgan.decoder.z_shape
def __lowerCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=5 , __UpperCamelCase=True ):
"""simple docstring"""
snake_case_ = []
if output_path is None:
snake_case_ = './animation.gif'
if input_path is None:
snake_case_ = self.save_path
snake_case_ = sorted(glob(input_path + '/*' ) )
if not len(__UpperCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__UpperCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
snake_case_ = total_duration / len(__UpperCamelCase )
snake_case_ = [frame_duration] * len(__UpperCamelCase )
if extend_frames:
snake_case_ = 1.5
snake_case_ = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__UpperCamelCase ) )
imageio.mimsave(__UpperCamelCase , __UpperCamelCase , duration=__UpperCamelCase )
print(f"""gif saved to {output_path}""" )
def __lowerCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
snake_case_ = preprocess(Image.open(__UpperCamelCase ) , target_image_size=2_56 ).to(self.device )
snake_case_ = preprocess_vqgan(__UpperCamelCase )
snake_case_ , *snake_case_ = self.vqgan.encode(__UpperCamelCase )
return z
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.latent.detach().requires_grad_()
snake_case_ = base_latent + transform_vector
if self.quantize:
snake_case_ , *snake_case_ = self.vqgan.quantize(__UpperCamelCase )
else:
snake_case_ = trans_latent
return self.vqgan.decode(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
snake_case_ = self.clip_preprocessor(text=__UpperCamelCase , images=__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase )
snake_case_ = self.clip(**__UpperCamelCase )
snake_case_ = clip_outputs.logits_per_image
if weights is not None:
snake_case_ = similarity_logits * weights
return similarity_logits.sum()
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self._get_clip_similarity(pos_prompts['prompts'] , __UpperCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
snake_case_ = self._get_clip_similarity(neg_prompts['prompts'] , __UpperCamelCase , weights=neg_prompts['weights'] )
else:
snake_case_ = torch.tensor([1] , device=self.device )
snake_case_ = -torch.log(__UpperCamelCase ) + torch.log(__UpperCamelCase )
return loss
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = torch.randn_like(self.latent , requires_grad=__UpperCamelCase , device=self.device )
snake_case_ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case_ = self._add_vector(__UpperCamelCase )
snake_case_ = loop_post_process(__UpperCamelCase )
snake_case_ = self._get_CLIP_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
print('CLIP loss' , __UpperCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
wandb.init(reinit=__UpperCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
snake_case_ = Image.open(__UpperCamelCase )
snake_case_ = image.resize((2_56, 2_56) )
wandb.log('Original Image' , wandb.Image(__UpperCamelCase ) )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
if not prompts:
return []
snake_case_ = []
snake_case_ = []
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__UpperCamelCase , (tuple, list) ):
snake_case_ = prompt[0]
snake_case_ = float(prompt[1] )
elif ":" in prompt:
snake_case_ , snake_case_ = prompt.split(':' )
snake_case_ = float(__UpperCamelCase )
else:
snake_case_ = prompt
snake_case_ = 1.0
processed_prompts.append(__UpperCamelCase )
weights.append(__UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCamelCase , device=self.device ),
}
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , ):
"""simple docstring"""
if image_path:
snake_case_ = self._get_latent(__UpperCamelCase )
else:
snake_case_ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case_ = self.process_prompts(__UpperCamelCase )
snake_case_ = self.process_prompts(__UpperCamelCase )
if save_final and save_path is None:
snake_case_ = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
snake_case_ = save_path + '_' + get_timestamp()
os.makedirs(__UpperCamelCase )
snake_case_ = save_path
snake_case_ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__UpperCamelCase ) )
snake_case_ = loop_post_process(__UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ):
if show_intermediate:
show_pil(__UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(__UpperCamelCase )} )
if show_final:
show_pil(__UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 46
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
snake_case_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = (DDIMParallelScheduler,)
__A = (("""eta""", 0.0), ("""num_inference_steps""", 5_0))
def __lowerCAmelCase ( self , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__UpperCamelCase )
return config
def __lowerCAmelCase ( self , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**__UpperCamelCase )
snake_case_ = scheduler_class(**__UpperCamelCase )
snake_case_ , snake_case_ = 10, 0.0
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for t in scheduler.timesteps:
snake_case_ = model(__UpperCamelCase , __UpperCamelCase )
snake_case_ = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __lowerCAmelCase ( self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(steps_offset=1 )
snake_case_ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=__UpperCamelCase , num_inference_steps=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__UpperCamelCase , eta=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**__UpperCamelCase )
snake_case_ , snake_case_ = 10, 0.0
scheduler.set_timesteps(__UpperCamelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
snake_case_ = self.dummy_sample_deter + 0.1
snake_case_ = self.dummy_sample_deter - 0.1
snake_case_ = samplea.shape[0]
snake_case_ = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case_ = torch.arange(__UpperCamelCase )[0:3, None].repeat(1 , __UpperCamelCase )
snake_case_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case_ = scheduler.batch_step_no_noise(__UpperCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __UpperCamelCase )
snake_case_ = torch.sum(torch.abs(__UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.full_loop()
snake_case_ = torch.sum(torch.abs(__UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.full_loop(prediction_type='v_prediction' )
snake_case_ = torch.sum(torch.abs(__UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(__UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
snake_case_ = torch.sum(torch.abs(__UpperCamelCase ) )
snake_case_ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 46
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = OpenAIGPTTokenizer
__A = OpenAIGPTTokenizerFast
__A = True
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
snake_case_ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case_ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return "lower newer", "lower newer"
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case_ = 'lower'
snake_case_ = ['low', 'er</w>']
snake_case_ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case_ = tokens + ['<unk>']
snake_case_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
pass
| 46
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
snake_case_ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
snake_case_ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
snake_case_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ = FlaxBertModel.from_pretrained(__UpperCamelCase )
snake_case_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
snake_case_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
snake_case_ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
snake_case_ = FlaxAutoModel.from_pretrained('bert-base' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
snake_case_ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
snake_case_ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
snake_case_ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 46
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowercase__ , default=lowercase__ , required=lowercase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowercase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowercase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowercase__ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowercase__ , default=0 , help='cuda_id.' , )
snake_case_ = parser.parse_args()
return args
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if not len(lowercase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
snake_case_ , snake_case_ = imgs[0].size
snake_case_ = Image.new('RGB' , size=(cols * w, rows * h) )
snake_case_ , snake_case_ = grid.size
for i, img in enumerate(lowercase__ ):
grid.paste(lowercase__ , box=(i % cols * w, i // cols * h) )
return grid
def a(lowercase__ , lowercase__="robotic cat with wings" , lowercase__=7.5 , lowercase__=50 , lowercase__=1 , lowercase__=42 , ):
'''simple docstring'''
snake_case_ = torch.Generator(pipeline.device ).manual_seed(lowercase__ )
snake_case_ = pipeline(
lowercase__ , guidance_scale=lowercase__ , num_inference_steps=lowercase__ , generator=lowercase__ , num_images_per_prompt=lowercase__ , ).images
snake_case_ = int(math.sqrt(lowercase__ ) )
snake_case_ = image_grid(lowercase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
A = parse_args()
# Load models and create wrapper for stable diffusion
A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
A = unet.to(torch.device('cuda', args.cuda_id))
A = pipeline.to(unet.device)
A , A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
A = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 46
|
import operator as op
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 46
| 1
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return abs(lowercase__ ) if a == 0 else greatest_common_divisor(b % a , lowercase__ )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
snake_case_ , snake_case_ = y, x % y
return abs(lowercase__ )
def a():
'''simple docstring'''
try:
snake_case_ = input('Enter two integers separated by comma (,): ' ).split(',' )
snake_case_ = int(nums[0] )
snake_case_ = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(lowercase__ , lowercase__ )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase__ , lowercase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 46
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46
| 1
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__A = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__A = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__A = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__A = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__A = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__A = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__A = field(
default=1_0_0_0_0 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__A = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__A = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__A = field(
default=7_5_0 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__A = field(
default=1_6 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__A = field(
default=__snake_case , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__A = field(default=5_0_0_0_0 , metadata={"""help""": """Maximum number of training steps."""} )
__A = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__A = field(default=1_0_2_4 , metadata={"""help""": """Sequence lengths used for training."""} )
__A = field(default=1 , metadata={"""help""": """Training seed."""} )
__A = field(
default=1_0_2_4 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__A = field(default=__snake_case , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__A = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__A = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__A = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__A = field(default=1_0_2_4 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__A = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__A = field(default=__snake_case , metadata={"""help""": """Number of workers used for code evaluation."""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Sample from the language model's output distribution."""} )
__A = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__A = field(default=2_5_6 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__A = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__A = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__A = field(default=1_0 , metadata={"""help""": """Number of generations to run in parallel."""} )
__A = field(
default=2_0_0 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__A = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__A = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__A = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__A = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default=__snake_case , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__A = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__A = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__A = field(
default=1_0_0_0_0_0 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__A = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__A = field(
default=1_0_0_0 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__A = field(
default=1_0_0 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__A = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__A = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__A = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__A = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__A = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__A = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__A = field(default=2_0_0_0_0_0 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__A = field(
default=3_2_7_6_8 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__A = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__A = field(default=__snake_case , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__A = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__A = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__A = field(default=__snake_case , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__A = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__A = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__A = field(default=__snake_case , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 46
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=__UpperCamelCase )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type='numpy' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = ["""image_processor""", """tokenizer"""]
__A = """ViltImageProcessor"""
__A = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __UpperCamelCase , )
snake_case_ = kwargs.pop('feature_extractor' )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
snake_case_ = self.image_processor
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
encoding.update(__UpperCamelCase )
return encoding
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCamelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCamelCase , )
return self.image_processor
| 46
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = LEDConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=4 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = tf.concat(
[tf.zeros_like(__UpperCamelCase )[:, :-1], tf.ones_like(__UpperCamelCase )[:, -1:]] , axis=-1 , )
snake_case_ = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFLEDModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'] )
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCamelCase ):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a(lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
A = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3 )
| 46
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('T')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
__A = 42 # Cache store of keys
__A = 42 # References of the keys in cache
__A = 1_0 # Maximum capacity of cache
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = deque()
snake_case_ = set()
if not n:
snake_case_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
snake_case_ = n
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ = self.dq_store.pop()
self.key_reference.remove(__UpperCamelCase )
else:
self.dq_store.remove(__UpperCamelCase )
self.dq_store.appendleft(__UpperCamelCase )
self.key_reference.add(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
for k in self.dq_store:
print(__UpperCamelCase )
def __repr__( self ):
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 46
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
| 1
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = load_tool('text-to-speech' )
self.tool.setup()
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = self.tool('hey' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = self.tool('hey' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 46
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase , return_dict=__UpperCamelCase )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'google/ncsnpp-church-256'
snake_case_ = UNetaDModel.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 46
| 1
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
snake_case_ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case_ = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowercase__ , output_all_encodings=lowercase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowercase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case_ = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
snake_case_ = os.path.join(get_home_dir() , 'models' )
snake_case_ = _load_vocab(lowercase__ , lowercase__ , lowercase__ , cls=lowercase__ )
snake_case_ = nlp.model.BERTModel(
lowercase__ , len(lowercase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowercase__ , use_token_type_embed=lowercase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowercase__ , use_decoder=lowercase__ , )
original_bort.load_parameters(lowercase__ , cast_dtype=lowercase__ , ignore_extra=lowercase__ )
snake_case_ = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case_ = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowercase__ ),
}
snake_case_ = BertConfig.from_dict(lowercase__ )
snake_case_ = BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase__ ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase__ , lowercase__ ):
snake_case_ = hf_param.shape
snake_case_ = to_torch(params[gluon_param] )
snake_case_ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
snake_case_ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
snake_case_ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
snake_case_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
snake_case_ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case_ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case_ = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case_ = layer.attention.self
snake_case_ = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
snake_case_ = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
snake_case_ = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
snake_case_ = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
snake_case_ = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
snake_case_ = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
snake_case_ = layer.attention.output
snake_case_ = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
snake_case_ = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
snake_case_ = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
snake_case_ = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
snake_case_ = layer.intermediate
snake_case_ = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
snake_case_ = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
snake_case_ = layer.output
snake_case_ = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
snake_case_ = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
snake_case_ = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
snake_case_ = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case_ = RobertaTokenizer.from_pretrained('roberta-base' )
snake_case_ = tokenizer.encode_plus(lowercase__ )['input_ids']
# Get gluon output
snake_case_ = mx.nd.array([input_ids] )
snake_case_ = original_bort(inputs=lowercase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
snake_case_ = BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
snake_case_ = tokenizer.encode_plus(lowercase__ , return_tensors='pt' )
snake_case_ = hf_bort_model(**lowercase__ )[0]
snake_case_ = output_gluon[0].asnumpy()
snake_case_ = output_hf[0].detach().numpy()
snake_case_ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case_ = np.allclose(lowercase__ , lowercase__ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 46
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
"""simple docstring"""
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
snake_case_ = [''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1_00 , __UpperCamelCase = 5.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , ):
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
snake_case_ = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
snake_case_ = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
snake_case_ = torch.exp(__UpperCamelCase )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 46
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 1
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase )
return image
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__UpperCamelCase )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
def extract(*__UpperCamelCase , **__UpperCamelCase ):
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
snake_case_ = torch.ones([0] )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.pixel_values.to(__UpperCamelCase )
return self
return Out()
return extract
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ = 77
snake_case_ = self.dummy_image.to(__UpperCamelCase )
snake_case_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
snake_case_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__UpperCamelCase , )
snake_case_ = output.images
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_cond_unet
snake_case_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
snake_case_ = self.dummy_vae
snake_case_ = self.dummy_text_encoder
snake_case_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
snake_case_ = 77
snake_case_ = self.dummy_image.to(__UpperCamelCase )
# put models in fp16
snake_case_ = unet.half()
snake_case_ = vae.half()
snake_case_ = bert.half()
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionImgaImgPipeline(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , )
snake_case_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCamelCase )
snake_case_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = 'A painting of a squirrel eating a burger'
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe(
[prompt] , generator=__UpperCamelCase , num_inference_steps=2 , output_type='np' , image=__UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case_ = init_image.resize((7_60, 5_04) )
snake_case_ = 'BAAI/AltDiffusion'
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='np' , )
snake_case_ = output.images[0]
snake_case_ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
snake_case_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case_ = init_image.resize((7_68, 5_12) )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
snake_case_ = 'BAAI/AltDiffusion'
snake_case_ = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCamelCase , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 46
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=6_40 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = last_hidden_size
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = conv_kernel_size
snake_case_ = output_stride
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__A = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTModelTester(self )
snake_case_ = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case_ = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
snake_case_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
snake_case_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 46
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 46
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def a(lowercase__ , lowercase__=False ):
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a(lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''
else:
snake_case_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = dct.pop(lowercase__ )
snake_case_ = val
def a():
'''simple docstring'''
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a(lowercase__ , lowercase__ , lowercase__=True ):
'''simple docstring'''
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1000
snake_case_ = 'huggingface/label-files'
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load('facebookresearch/dino:main' , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
snake_case_ = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
snake_case_ = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case_ = encoding['pixel_values']
snake_case_ = model(lowercase__ )
if base_model:
snake_case_ = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
snake_case_ = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1e-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
A = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 46
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(__UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(__UpperCamelCase , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-12 )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ):
"""simple docstring"""
if is_tf_available():
__A = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for batch_size in range(1 , len(__UpperCamelCase ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for input_row in range(len(__UpperCamelCase ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
@require_tensorflow_text
def __lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCamelCase , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __lowerCAmelCase ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.tokenizer.tokenize(__UpperCamelCase )
snake_case_ , snake_case_ = text.pad_model_inputs(
__UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
return self.tokenizer.detokenize(__UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(__UpperCamelCase )
snake_case_ = tf.keras.Model(__UpperCamelCase , __UpperCamelCase )
keras_model.save(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(__UpperCamelCase , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(__UpperCamelCase , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase , foo='bar' ).numpy()
self.assertTrue(np.array_equal(__UpperCamelCase , __UpperCamelCase ) )
class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
with self.assertRaises(__UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCamelCase , foo='bar' )
| 46
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def a(lowercase__ , lowercase__ = 16 ):
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
snake_case_ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1":
snake_case_ = 2
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['lr']
snake_case_ = int(config['num_epochs'] )
snake_case_ = int(config['seed'] )
snake_case_ = int(config['batch_size'] )
snake_case_ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
snake_case_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ = MAX_GPU_BATCH_SIZE
set_seed(lowercase__ )
snake_case_ , snake_case_ = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ = model(**lowercase__ )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**lowercase__ )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case_ = parser.parse_args()
snake_case_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 46
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def __lowerCAmelCase ( self ):
"""simple docstring"""
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case_ = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ = TapasConfig.from_json_file(lowercase__ )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.66_4694
snake_case_ = 0.20_7951
snake_case_ = 0.12_1194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.035_2513
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.90_3421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.76_3141
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=lowercase__ )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=lowercase__ )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=lowercase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(lowercase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46
| 1
|
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
for data in source_data:
for i, el in enumerate(lowercase__ ):
if len(lowercase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase__ ) )
return data_lists
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = []
for dlist, weight in zip(lowercase__ , lowercase__ ):
snake_case_ = min(lowercase__ )
snake_case_ = max(lowercase__ )
snake_case_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
snake_case_ = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowercase__ )
score_lists.append(lowercase__ )
return score_lists
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase__ ):
snake_case_ = final_scores[j] + ele
return final_scores
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = get_data(lowercase__ )
snake_case_ = calculate_each_score(lowercase__ , lowercase__ )
snake_case_ = generate_final_scores(lowercase__ )
# append scores to source data
for i, ele in enumerate(lowercase__ ):
source_data[i].append(lowercase__ )
return source_data
| 46
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__A = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
snake_case_ = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = config.window_size**2
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
snake_case_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = None
__A = None
A = namedtuple('CoinsDistribResult', 'moves excess')
def a(lowercase__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowercase__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase__ ) != count_coins(lowercase__ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowercase__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case_ , snake_case_ = get_distrib(node.left )
snake_case_ , snake_case_ = get_distrib(node.right )
snake_case_ = 1 - left_distrib_excess
snake_case_ = 1 - right_distrib_excess
snake_case_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase__ )
+ abs(lowercase__ )
)
snake_case_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase__ , lowercase__ )
return get_distrib(lowercase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A = parser.parse_args()
A = 'cpu'
A = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A = 'path-to-your-trained-model'
A = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A = pipe.to(device)
# to channels last
A = pipe.unet.to(memory_format=torch.channels_last)
A = pipe.vae.to(memory_format=torch.channels_last)
A = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A = torch.randn(2, 4, 64, 64)
A = torch.rand(1) * 999
A = torch.randn(2, 77, 768)
A = (sample, timestep, encoder_hidden_status)
try:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A = 666
A = torch.Generator(device).manual_seed(seed)
A = {'generator': generator}
if args.steps is not None:
A = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 46
| 1
|
import math
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 2
snake_case_ = int(math.sqrt(lowercase__ ) ) # Size of every segment
snake_case_ = [True] * (end + 1)
snake_case_ = []
while start <= end:
if temp[start] is True:
in_prime.append(lowercase__ )
for i in range(start * start , end + 1 , lowercase__ ):
snake_case_ = False
start += 1
prime += in_prime
snake_case_ = end + 1
snake_case_ = min(2 * end , lowercase__ )
while low <= n:
snake_case_ = [True] * (high - low + 1)
for each in in_prime:
snake_case_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowercase__ , high + 1 , lowercase__ ):
snake_case_ = False
for j in range(len(lowercase__ ) ):
if temp[j] is True:
prime.append(j + low )
snake_case_ = high + 1
snake_case_ = min(high + end , lowercase__ )
return prime
print(sieve(10**6))
| 46
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A = re.compile(R'\s+')
def a(lowercase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowercase__ , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = [len(lowercase__ ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowercase__ ), "line_max": max(lowercase__ )}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def a(lowercase__ , lowercase__=5 ):
'''simple docstring'''
snake_case_ = ['auto-generated', 'autogenerated', 'automatically generated']
snake_case_ = example['content'].splitlines()
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a(lowercase__ , lowercase__=5 , lowercase__=0.05 ):
'''simple docstring'''
snake_case_ = ['unit tests', 'test file', 'configuration file']
snake_case_ = example['content'].splitlines()
snake_case_ = 0
snake_case_ = 0
# first test
for _, line in zip(range(lowercase__ ) , lowercase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ = example['content'].count('\n' )
snake_case_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = ['def ', 'class ', 'for ', 'while ']
snake_case_ = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a(lowercase__ , lowercase__=4 ):
'''simple docstring'''
snake_case_ = example['content'].splitlines()
snake_case_ = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = tokenizer(example['content'] , truncation=lowercase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(lowercase__ )
return {"ratio": ratio}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = {}
results.update(get_hash(lowercase__ ) )
results.update(line_stats(lowercase__ ) )
results.update(alpha_stats(lowercase__ ) )
results.update(char_token_ratio(lowercase__ ) )
results.update(is_autogenerated(lowercase__ ) )
results.update(is_config_or_test(lowercase__ ) )
results.update(has_no_keywords(lowercase__ ) )
results.update(has_few_assignments(lowercase__ ) )
return results
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if not check_uniques(lowercase__ , lowercase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a(lowercase__ ):
'''simple docstring'''
with open(lowercase__ , 'rb' ) as f_in:
with gzip.open(str(lowercase__ ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowercase__ , lowercase__ )
os.unlink(lowercase__ )
# Settings
A = HfArgumentParser(PreprocessingArguments)
A = parser.parse_args()
if args.num_workers is None:
A = multiprocessing.cpu_count()
A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A = time.time()
A = load_dataset(args.dataset_name, split='train')
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
A = time.time()
A = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
A = set(ds.unique('hash'))
A = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
A = time.time()
A = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A = time.time()
A , A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
A = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A = str(data_dir / f"""file-{file_number+1:012}.json""")
A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 46
|
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(__UpperCamelCase )
def __getitem__( self , __UpperCamelCase ):
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) - 1
snake_case_ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
snake_case_ = idx
snake_case_ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
snake_case_ = self.get_right_child_idx(__UpperCamelCase )
snake_case_ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
snake_case_ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.heap.append(__UpperCamelCase )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
A = Node('R', -1)
A = Node('B', 6)
A = Node('A', 3)
A = Node('X', 1)
A = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a(lowercase__ ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = np.max(_outputs , axis=-1 , keepdims=lowercase__ )
snake_case_ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """sigmoid"""
__A = """softmax"""
__A = """none"""
@add_end_docstrings(
__snake_case , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = False
__A = ClassificationFunction.NONE
def __init__( self , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="" , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = tokenizer_kwargs
snake_case_ = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
snake_case_ = self.model.config.return_all_scores
if isinstance(__UpperCamelCase , __UpperCamelCase ) or top_k is None:
snake_case_ = top_k
snake_case_ = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCamelCase , )
if return_all_scores:
snake_case_ = None
else:
snake_case_ = 1
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case_ = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.framework
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return self.tokenizer(**__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) == 1 and isinstance(inputs[0] , __UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCamelCase , **__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.model(**__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
snake_case_ = self.model.config.function_to_apply
else:
snake_case_ = ClassificationFunction.NONE
snake_case_ = model_outputs['logits'][0]
snake_case_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case_ = sigmoid(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case_ = softmax(__UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
snake_case_ = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case_ = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCamelCase : x["score"] , reverse=__UpperCamelCase )
if top_k is not None:
snake_case_ = dict_scores[:top_k]
return dict_scores
| 46
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PerceiverFeatureExtractor']
A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.2_5) = }""")
print(f"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 46
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
snake_case_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
snake_case_ = size if size is not None else {'shortest_edge': 18}
snake_case_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def __lowerCAmelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 46
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
A = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
snake_case_ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ = new_key.replace(lowercase__ , lowercase__ )
print(f"""{key} -> {new_key}""" )
snake_case_ = s_dict.pop(lowercase__ )
return s_dict
def a(lowercase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
snake_case_ = emb.weight.data
return lin_layer
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
snake_case_ = os.path.basename(lowercase__ )
snake_case_ = url.split('/' )[-2]
snake_case_ = os.path.join(lowercase__ , lowercase__ )
if os.path.exists(lowercase__ ) and not os.path.isfile(lowercase__ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase__ ):
snake_case_ = open(lowercase__ , 'rb' ).read()
if hashlib.shaaaa(lowercase__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase__ ) as source, open(lowercase__ , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=lowercase__ , unit_divisor=1024 ) as loop:
while True:
snake_case_ = source.read(8192 )
if not buffer:
break
output.write(lowercase__ )
loop.update(len(lowercase__ ) )
snake_case_ = open(lowercase__ , 'rb' ).read()
if hashlib.shaaaa(lowercase__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
snake_case_ = _download(_MODELS[checkpoint_path] )
else:
snake_case_ = torch.load(lowercase__ , map_location='cpu' )
snake_case_ = original_checkpoint['dims']
snake_case_ = original_checkpoint['model_state_dict']
snake_case_ = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(lowercase__ )
rename_keys(lowercase__ )
snake_case_ = True
snake_case_ = state_dict['decoder.layers.0.fc1.weight'].shape[0]
snake_case_ = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=lowercase__ , decoder_ffn_dim=lowercase__ , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
snake_case_ = WhisperForConditionalGeneration(lowercase__ )
snake_case_ , snake_case_ = model.model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0 and not set(lowercase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ = proj_out_weights
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 46
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46
| 1
|
def a(lowercase__=28123 ):
'''simple docstring'''
snake_case_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case_ = set()
snake_case_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 46
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46
| 1
|
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = 42
__A = 42
def a(lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowercase__ ) )]
def a(lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
snake_case_ = all_rotations(lowercase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
snake_case_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase__ ),
}
return response
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
snake_case_ = int(lowercase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowercase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
snake_case_ = [''] * len(lowercase__ )
for _ in range(len(lowercase__ ) ):
for i in range(len(lowercase__ ) ):
snake_case_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
A = 'Provide a string that I will generate its BWT transform: '
A = input(entry_msg).strip()
A = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
A = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 46
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """Wav2Vec2FeatureExtractor"""
__A = """AutoTokenizer"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
snake_case_ = self.feature_extractor
snake_case_ = False
@classmethod
def __lowerCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
try:
return super().from_pretrained(__UpperCamelCase , **__UpperCamelCase )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , __UpperCamelCase , )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = WavaVecaCTCTokenizer.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
return cls(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
snake_case_ = kwargs.pop('raw_speech' )
else:
snake_case_ = kwargs.pop('audio' , __UpperCamelCase )
snake_case_ = kwargs.pop('sampling_rate' , __UpperCamelCase )
snake_case_ = kwargs.pop('text' , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if text is not None:
snake_case_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ = encodings['input_ids']
return inputs
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__UpperCamelCase , **__UpperCamelCase )
snake_case_ = kwargs.pop('input_features' , __UpperCamelCase )
snake_case_ = kwargs.pop('labels' , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if input_features is not None:
snake_case_ = self.feature_extractor.pad(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if labels is not None:
snake_case_ = self.tokenizer.pad(__UpperCamelCase , **__UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
snake_case_ = labels['input_ids']
return input_features
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def __lowerCAmelCase ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.feature_extractor
snake_case_ = False
| 46
|
import operator as op
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PerceiverFeatureExtractor']
A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=__UpperCamelCase )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type='numpy' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46
| 1
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
A = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for attribute in key.split('.' ):
snake_case_ = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
snake_case_ = getattr(lowercase__ , lowercase__ ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(lowercase__ )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
else:
snake_case_ = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def a(lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
# load the pre-trained checkpoints
snake_case_ = torch.load(lowercase__ )
snake_case_ = WavLMConfigOrig(checkpoint['cfg'] )
snake_case_ = WavLMOrig(lowercase__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
snake_case_ = WavLMConfig.from_pretrained(lowercase__ )
else:
snake_case_ = WavLMConfig()
snake_case_ = WavLMModel(lowercase__ )
recursively_load_weights(lowercase__ , lowercase__ )
hf_wavlm.save_pretrained(lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 46
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = LEDConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=4 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = tf.concat(
[tf.zeros_like(__UpperCamelCase )[:, :-1], tf.ones_like(__UpperCamelCase )[:, -1:]] , axis=-1 , )
snake_case_ = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFLEDModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'] )
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCamelCase ):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a(lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
A = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3 )
| 46
| 1
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """The column name of the images in the files."""} )
__A = field(default=__snake_case , metadata={"""help""": """A folder containing the training data."""} )
__A = field(default=__snake_case , metadata={"""help""": """A folder containing the validation data."""} )
__A = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = {}
if self.train_dir is not None:
snake_case_ = self.train_dir
if self.validation_dir is not None:
snake_case_ = self.validation_dir
snake_case_ = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(default=__snake_case , metadata={"""help""": """Name or path of preprocessor config."""} )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__A = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
__A = field(
default=__snake_case , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
snake_case_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
snake_case_ = ds['train'].train_test_split(data_args.train_val_split )
snake_case_ = split['train']
snake_case_ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
snake_case_ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
snake_case_ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
snake_case_ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
snake_case_ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case_ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
snake_case_ = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
snake_case_ = ds['train'].column_names
else:
snake_case_ = ds['validation'].column_names
if data_args.image_column_name is not None:
snake_case_ = data_args.image_column_name
elif "image" in column_names:
snake_case_ = 'image'
elif "img" in column_names:
snake_case_ = 'img'
else:
snake_case_ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case_ = image_processor.size['shortest_edge']
else:
snake_case_ = (image_processor.size['height'], image_processor.size['width'])
snake_case_ = Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ ):
snake_case_ = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
snake_case_ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
snake_case_ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
snake_case_ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case_ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ = trainer.evaluate()
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
# Write model card and (optionally) push to hub
snake_case_ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def __lowerCAmelCase ( self ):
"""simple docstring"""
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case_ = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase , return_dict=__UpperCamelCase )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'google/ncsnpp-church-256'
snake_case_ = UNetaDModel.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 46
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """gpt_neox_japanese"""
def __init__( self , __UpperCamelCase=3_20_00 , __UpperCamelCase=25_60 , __UpperCamelCase=32 , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=1.00 , __UpperCamelCase=1_00_00 , __UpperCamelCase=20_48 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=3_19_96 , __UpperCamelCase=3_19_99 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_multiple_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
| 46
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ):
"""simple docstring"""
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(__UpperCamelCase , __UpperCamelCase )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
__A = 42
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCamelCase , transformer=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , scheduler=__UpperCamelCase , learned_classifier_free_sampling_embeddings=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCamelCase , 1 , 1 )
else:
snake_case_ = [''] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , __UpperCamelCase , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1_00 , __UpperCamelCase = 5.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , ):
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = len(__UpperCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__UpperCamelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(__UpperCamelCase , __UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCamelCase , dim=1 , keepdim=__UpperCamelCase )
snake_case_ = self.truncate(__UpperCamelCase , __UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(__UpperCamelCase , shape=__UpperCamelCase )
snake_case_ = self.vqvae.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ , snake_case_ = torch.sort(__UpperCamelCase , 1 , descending=__UpperCamelCase )
snake_case_ = torch.exp(__UpperCamelCase )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __UpperCamelCase )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 46
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A = 'src/diffusers'
A = '.'
# This is to make sure the diffusers module imported is the one in the repo.
A = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
A = spec.loader.load_module()
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
return line.startswith(lowercase__ ) or len(lowercase__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowercase__ ) is not None
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = object_name.split('.' )
snake_case_ = 0
# First let's find the module where our object lives.
snake_case_ = parts[i]
while i < len(lowercase__ ) and not os.path.isfile(os.path.join(lowercase__ , f"""{module}.py""" ) ):
i += 1
if i < len(lowercase__ ):
snake_case_ = os.path.join(lowercase__ , parts[i] )
if i >= len(lowercase__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowercase__ , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
# Now let's find the class / func in the code!
snake_case_ = ''
snake_case_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase__ ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case_ = line_index
while line_index < len(lowercase__ ) and _should_continue(lines[line_index] , lowercase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ = lines[start_index:line_index]
return "".join(lowercase__ )
A = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
A = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
A = re.compile(R'<FILL\s+[^>]*>')
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = code.split('\n' )
snake_case_ = 0
while idx < len(lowercase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = len(get_indent(lowercase__ ) ) > 0
if has_indent:
snake_case_ = f"""class Bla:\n{code}"""
snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowercase__ )
snake_case_ = black.format_str(lowercase__ , mode=lowercase__ )
snake_case_ , snake_case_ = style_docstrings_in_code(lowercase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def a(lowercase__ , lowercase__=False ):
'''simple docstring'''
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
snake_case_ = f.readlines()
snake_case_ = []
snake_case_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase__ ):
snake_case_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case_ , snake_case_ , snake_case_ = search.groups()
snake_case_ = find_code_in_diffusers(lowercase__ )
snake_case_ = get_indent(lowercase__ )
snake_case_ = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case_ = theoretical_indent
snake_case_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case_ = True
while line_index < len(lowercase__ ) and should_continue:
line_index += 1
if line_index >= len(lowercase__ ):
break
snake_case_ = lines[line_index]
snake_case_ = _should_continue(lowercase__ , lowercase__ ) and re.search(f"""^{indent}# End copy""" , lowercase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
snake_case_ = lines[start_index:line_index]
snake_case_ = ''.join(lowercase__ )
# Remove any nested `Copied from` comments to avoid circular copies
snake_case_ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowercase__ ) is None]
snake_case_ = '\n'.join(lowercase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase__ ) > 0:
snake_case_ = replace_pattern.replace('with' , '' ).split(',' )
snake_case_ = [_re_replace_pattern.search(lowercase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case_ , snake_case_ , snake_case_ = pattern.groups()
snake_case_ = re.sub(lowercase__ , lowercase__ , lowercase__ )
if option.strip() == "all-casing":
snake_case_ = re.sub(obja.lower() , obja.lower() , lowercase__ )
snake_case_ = re.sub(obja.upper() , obja.upper() , lowercase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case_ = blackify(lines[start_index - 1] + theoretical_code )
snake_case_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
snake_case_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case_ = start_index + 1
if overwrite and len(lowercase__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowercase__ )
return diffs
def a(lowercase__ = False ):
'''simple docstring'''
snake_case_ = glob.glob(os.path.join(lowercase__ , '**/*.py' ) , recursive=lowercase__ )
snake_case_ = []
for filename in all_files:
snake_case_ = is_copy_consistent(lowercase__ , lowercase__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowercase__ ) > 0:
snake_case_ = '\n'.join(lowercase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 46
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=6_40 , __UpperCamelCase=4 , __UpperCamelCase="silu" , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = last_hidden_size
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = conv_kernel_size
snake_case_ = output_stride
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = classifier_dropout_prob
snake_case_ = use_labels
snake_case_ = is_training
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MobileViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MobileViTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__A = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTModelTester(self )
snake_case_ = MobileViTConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = 5
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case_ = 2
for i in range(len(__UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MobileViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = model.to(__UpperCamelCase )
snake_case_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase , target_sizes=[(50, 60)] )
snake_case_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase )
snake_case_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCamelCase )
| 46
| 1
|
from math import log
from scipy.constants import Boltzmann, physical_constants
A = 300 # TEMPERATURE (unit = K)
def a(lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 46
| 1
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase=0.01 , __UpperCamelCase=10_00 ):
"""simple docstring"""
snake_case_ = p_stop
snake_case_ = max_length
def __iter__( self ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case_ = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ):
"""simple docstring"""
snake_case_ = [
BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
for i in range(2 )
]
snake_case_ = [list(__UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCamelCase ) for shard in batch_sampler_shards] , [len(__UpperCamelCase ) for e in expected] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
snake_case_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case_ = [BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , even_batches=__UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=False ):
"""simple docstring"""
random.seed(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = [
IterableDatasetShard(
__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=__UpperCamelCase , num_processes=__UpperCamelCase , process_index=__UpperCamelCase , split_batches=__UpperCamelCase , )
for i in range(__UpperCamelCase )
]
snake_case_ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCamelCase )
iterable_dataset_lists.append(list(__UpperCamelCase ) )
snake_case_ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case_ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(len(__UpperCamelCase ) % shard_batch_size == 0 )
snake_case_ = []
for idx in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCamelCase ) < len(__UpperCamelCase ):
reference += reference
self.assertListEqual(__UpperCamelCase , reference[: len(__UpperCamelCase )] )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
# Edge case with a very small dataset
snake_case_ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCamelCase )
snake_case_ = SkipBatchSampler(__UpperCamelCase , 2 )
self.assertListEqual(list(__UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case_ = skip_first_batches(__UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
Accelerator()
snake_case_ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 46
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ = tf_top_k_top_p_filtering(__UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ = output[output != -float('inf' )]
snake_case_ = tf.cast(
tf.where(tf.not_equal(__UpperCamelCase , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-12 )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ):
"""simple docstring"""
if is_tf_available():
__A = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 2
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2, 0], [1_02, 1_03]]
snake_case_ = [[1, 0], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for batch_size in range(1 , len(__UpperCamelCase ) + 1 ):
snake_case_ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 1
snake_case_ = 2
class SCREAMING_SNAKE_CASE ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
super(__UpperCamelCase , self ).__init__()
snake_case_ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=__UpperCamelCase , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.model.generate(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , max_new_tokens=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , )
return {"sequences": outputs["sequences"]}
snake_case_ = [[2], [1_02, 1_03]]
snake_case_ = [[1], [1, 1]]
snake_case_ = DummyModel(model=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'serving_default': dummy_model.serving} )
snake_case_ = tf.saved_model.load(__UpperCamelCase ).signatures['serving_default']
for input_row in range(len(__UpperCamelCase ) ):
snake_case_ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ = serving_func(**__UpperCamelCase )['sequences']
snake_case_ = test_model.generate(**__UpperCamelCase , max_new_tokens=__UpperCamelCase )
tf.debugging.assert_equal(__UpperCamelCase , __UpperCamelCase )
@slow
@require_tensorflow_text
def __lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=__UpperCamelCase )
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
snake_case_ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__UpperCamelCase , 'spiece.model' ) , 'rb' ).read() )
snake_case_ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __lowerCAmelCase ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.tokenizer.tokenize(__UpperCamelCase )
snake_case_ , snake_case_ = text.pad_model_inputs(
__UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ = self.model.generate(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
return self.tokenizer.detokenize(__UpperCamelCase )
snake_case_ = CompleteSentenceTransformer()
snake_case_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
snake_case_ = complete_model(__UpperCamelCase )
snake_case_ = tf.keras.Model(__UpperCamelCase , __UpperCamelCase )
keras_model.save(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
snake_case_ = 14
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 'Hello, my dog is cute and'
snake_case_ = tokenizer(__UpperCamelCase , return_tensors='tf' )
snake_case_ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
snake_case_ = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ = [6_38, 1_98]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
snake_case_ = model.generate(**__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = 'Hugging Face is a technology company based in New York and Paris.'
snake_case_ = bart_tokenizer(__UpperCamelCase , return_tensors='tf' ).input_ids
snake_case_ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
snake_case_ = bart_model.generate(__UpperCamelCase , foo='bar' ).numpy()
self.assertTrue(np.array_equal(__UpperCamelCase , __UpperCamelCase ) )
class SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def __lowerCAmelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return super().call(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ = bart_model.generate(__UpperCamelCase ).numpy()
with self.assertRaises(__UpperCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__UpperCamelCase , foo='bar' )
| 46
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
"""simple docstring"""
def __init__( self , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(features=__UpperCamelCase )
snake_case_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import torch
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__UpperCamelCase )
return column
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import torch
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case_ = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
snake_case_ = {'dtype': torch.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case_ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
snake_case_ = np.asarray(__UpperCamelCase )
return torch.tensor(__UpperCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(__UpperCamelCase , '__array__' ) and not isinstance(__UpperCamelCase , torch.Tensor ):
snake_case_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
snake_case_ = self.recursive_tensorize(__UpperCamelCase )
snake_case_ = self._consolidate(__UpperCamelCase )
return column
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_batch(__UpperCamelCase )
snake_case_ = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
snake_case_ = self._consolidate(batch[column_name] )
return batch
| 46
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def __lowerCAmelCase ( self ):
"""simple docstring"""
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
snake_case_ = True
snake_case_ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__A = False
__A = False
__A = False
__A = False
__A = ()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='MRA does not output attentions' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
snake_case_ = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
snake_case_ = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = 5_02_65
snake_case_ = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
def a(lowercase__ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
snake_case_ , snake_case_ = head.next, head
while fast and fast.next:
snake_case_ = fast.next.next
snake_case_ = slow.next
snake_case_ = slow.next
snake_case_ = None # Don't forget here! But forget still works!
# reverse the second part
snake_case_ = None
while second:
snake_case_ = second.next
snake_case_ = node
snake_case_ = second
snake_case_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
snake_case_ = node.next
snake_case_ = head.next
return True
def a(lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
snake_case_ = snake_case_ = snake_case_ = head
while fast and fast.next:
snake_case_ , snake_case_ = fast.next.next, slow.next
# 2. Push the second half into the stack
snake_case_ = [slow.val]
while slow.next:
snake_case_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
snake_case_ = cur.next
return True
def a(lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
snake_case_ = {}
snake_case_ = 0
while head:
if head.val in d:
d[head.val].append(lowercase__ )
else:
snake_case_ = [pos]
snake_case_ = head.next
pos += 1
snake_case_ = pos - 1
snake_case_ = 0
for v in d.values():
if len(lowercase__ ) % 2 != 0:
middle += 1
else:
snake_case_ = 0
for i in range(0 , len(lowercase__ ) ):
if v[i] + v[len(lowercase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 46
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
snake_case_ = TapasConfig.from_json_file(lowercase__ )
# set absolute/relative position embeddings parameter
snake_case_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WTQ":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = True
# hparam_utils.py hparams
snake_case_ = 0.66_4694
snake_case_ = 0.20_7951
snake_case_ = 0.12_1194
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = 0.035_2513
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
snake_case_ = 4
snake_case_ = False
# hparam_utils.py hparams
snake_case_ = 36.4519
snake_case_ = 0.90_3421
snake_case_ = 222.088
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = 0.76_3141
snake_case_ = TapasForQuestionAnswering(config=lowercase__ )
elif task == "TABFACT":
snake_case_ = TapasForSequenceClassification(config=lowercase__ )
elif task == "MLM":
snake_case_ = TapasForMaskedLM(config=lowercase__ )
elif task == "INTERMEDIATE_PRETRAINING":
snake_case_ = TapasModel(config=lowercase__ )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
snake_case_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(lowercase__ )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 46
| 1
|
def a(lowercase__ = 50 ):
'''simple docstring'''
snake_case_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 46
|
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__A = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
snake_case_ = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = config.window_size**2
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ = len(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
snake_case_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
__UpperCamelCase )
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 46
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def a(lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False ):
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
snake_case_ = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = dct.pop(lowercase__ )
snake_case_ = val
@torch.no_grad()
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowercase__ )
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
if "vqa" in checkpoint_url:
snake_case_ = True
snake_case_ = 3129
snake_case_ = 'huggingface/label-files'
snake_case_ = 'vqa2-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = ViltForQuestionAnswering(lowercase__ )
elif "nlvr" in checkpoint_url:
snake_case_ = True
snake_case_ = 2
snake_case_ = {0: 'False', 1: 'True'}
snake_case_ = {v: k for k, v in config.idalabel.items()}
snake_case_ = 3
snake_case_ = ViltForImagesAndTextClassification(lowercase__ )
elif "irtr" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForImageAndTextRetrieval(lowercase__ )
elif "mlm_itm" in checkpoint_url:
snake_case_ = True
snake_case_ = ViltForMaskedLM(lowercase__ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )['state_dict']
snake_case_ = create_rename_keys(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ )
if mlm_model or irtr_model:
snake_case_ = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case_ , snake_case_ = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowercase__ )
# Define processor
snake_case_ = ViltImageProcessor(size=384 )
snake_case_ = BertTokenizer.from_pretrained('bert-base-uncased' )
snake_case_ = ViltProcessor(lowercase__ , lowercase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
snake_case_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowercase__ ).raw )
snake_case_ = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
snake_case_ = processor(lowercase__ , lowercase__ , return_tensors='pt' )
snake_case_ = processor(lowercase__ , lowercase__ , return_tensors='pt' )
snake_case_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case_ = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowercase__ ).raw )
if mlm_model:
snake_case_ = 'a bunch of [MASK] laying on a [MASK].'
else:
snake_case_ = 'How many cats are there?'
snake_case_ = processor(lowercase__ , lowercase__ , return_tensors='pt' )
snake_case_ = model(**lowercase__ )
# Verify outputs
if mlm_model:
snake_case_ = torch.Size([1, 11, 30522] )
snake_case_ = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1e-4 )
# verify masked token prediction equals "cats"
snake_case_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case_ = torch.Size([1, 3129] )
snake_case_ = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , lowercase__ , atol=1e-4 )
# verify vqa prediction equals "2"
snake_case_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case_ = torch.Size([1, 2] )
snake_case_ = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 46
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
A = parser.parse_args()
A = 'cpu'
A = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
A = 'path-to-your-trained-model'
A = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A = pipe.to(device)
# to channels last
A = pipe.unet.to(memory_format=torch.channels_last)
A = pipe.vae.to(memory_format=torch.channels_last)
A = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A = torch.randn(2, 4, 64, 64)
A = torch.rand(1) * 999
A = torch.randn(2, 77, 768)
A = (sample, timestep, encoder_hidden_status)
try:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A = 666
A = torch.Generator(device).manual_seed(seed)
A = {'generator': generator}
if args.steps is not None:
A = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 46
| 1
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A = ['small', 'medium', 'large']
A = 'lm_head.decoder.weight'
A = 'lm_head.weight'
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = torch.load(lowercase__ )
snake_case_ = d.pop(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 46
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=30 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=10 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=0.6 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFViTMAEModel(config=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFViTMAEForPreTraining(__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , training=__UpperCamelCase )
# expected sequence length = num_patches
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFViTMAEForPreTraining(__UpperCamelCase )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(__UpperCamelCase , training=__UpperCamelCase )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__A = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__A = False
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
snake_case_ = model(__UpperCamelCase , noise=__UpperCamelCase )
snake_case_ = copy.deepcopy(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = model(**__UpperCamelCase , noise=__UpperCamelCase )
snake_case_ = outputs_dict[0].numpy()
snake_case_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__UpperCamelCase ):
snake_case_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__UpperCamelCase ):
snake_case_ = v.numpy()
else:
snake_case_ = np.array(__UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
snake_case_ = prepare_numpy_arrays(__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , noise=__UpperCamelCase )
snake_case_ = model(**__UpperCamelCase , noise=__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.constant(__UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = tf_noise
super().check_pt_tf_models(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__UpperCamelCase )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(__UpperCamelCase , __UpperCamelCase ),)
if isinstance(__UpperCamelCase , __UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__UpperCamelCase , '_keras_serializable' , __UpperCamelCase )
}
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.convert_to_tensor(__UpperCamelCase )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ = main_layer_class(__UpperCamelCase )
snake_case_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ = tf.keras.Model(__UpperCamelCase , outputs=main_layer(__UpperCamelCase ) )
snake_case_ = model(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(__UpperCamelCase , 'keras_model.h5' )
model.save(__UpperCamelCase )
snake_case_ = tf.keras.models.load_model(
__UpperCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__UpperCamelCase , tf.keras.Model )
snake_case_ = model(__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase , __UpperCamelCase )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
snake_case_ = model(__UpperCamelCase , noise=__UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = outputs.last_hidden_state.numpy()
snake_case_ = 0
else:
snake_case_ = outputs.logits.numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
snake_case_ = model_class.from_pretrained(__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , noise=__UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = after_outputs['last_hidden_state'].numpy()
snake_case_ = 0
else:
snake_case_ = after_outputs['logits'].numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
snake_case_ = model(__UpperCamelCase , noise=__UpperCamelCase )
snake_case_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__UpperCamelCase )
snake_case_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ = model_class.from_config(model.config )
snake_case_ = new_model(__UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ = new_model(__UpperCamelCase , noise=__UpperCamelCase )
self.assert_outputs_same(__UpperCamelCase , __UpperCamelCase )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__UpperCamelCase )
def a():
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
snake_case_ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=__UpperCamelCase , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ = model(**__UpperCamelCase , noise=__UpperCamelCase )
# verify the logits
snake_case_ = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
snake_case_ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
| 46
|
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self ):
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __UpperCamelCase ):
"""simple docstring"""
return self.val < other.val
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(__UpperCamelCase )
def __getitem__( self , __UpperCamelCase ):
"""simple docstring"""
return self.get_value(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return (idx - 1) // 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 1
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return idx * 2 + 2
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return self.heap_dict[key]
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = len(__UpperCamelCase ) - 1
snake_case_ = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
snake_case_ = idx
snake_case_ = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
snake_case_ = self.get_right_child_idx(__UpperCamelCase )
snake_case_ = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
snake_case_ = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.heap[0]
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
self.heap.append(__UpperCamelCase )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
A = Node('R', -1)
A = Node('B', 6)
A = Node('A', 3)
A = Node('X', 1)
A = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PerceiverFeatureExtractor']
A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=64 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = embedding_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
snake_case_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = MegatronBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MegatronBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = MegatronBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = MegatronBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__A = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
# test_resize_embeddings = False
__A = False
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
snake_case_ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = MegatronBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCamelCase )
def a(lowercase__ ):
'''simple docstring'''
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
snake_case_ = os.path.join(os.environ['MYDIR'] , __UpperCamelCase )
snake_case_ = MegatronBertModel.from_pretrained(__UpperCamelCase )
model.to(__UpperCamelCase )
model.half()
snake_case_ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
snake_case_ = model(__UpperCamelCase )[0]
snake_case_ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , __UpperCamelCase )
snake_case_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
snake_case_ = output[0, ii, jj]
snake_case_ = expected[3 * ii + jj]
snake_case_ = 'ii={} jj={} a={} b={}'.format(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.assertTrue(math.isclose(__UpperCamelCase , __UpperCamelCase , rel_tol=__UpperCamelCase , abs_tol=__UpperCamelCase ) , msg=__UpperCamelCase )
| 46
|
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowercase__ , lowercase__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
snake_case_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
| 1
|
def a(lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = 0 ):
'''simple docstring'''
snake_case_ = right or len(lowercase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase__ , lowercase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowercase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=lowercase__ )
if len(lowercase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=lowercase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=lowercase__ )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a():
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=lowercase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=lowercase__ , type=lowercase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=lowercase__ )
return parser.parse_args()
if __name__ == "__main__":
A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46
| 1
|
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
A = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_test_to_tester_mapping(__UpperCamelCase )
snake_case_ = get_test_to_tester_mapping(__UpperCamelCase )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_model_to_test_mapping(__UpperCamelCase )
snake_case_ = get_model_to_test_mapping(__UpperCamelCase )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = get_model_to_tester_mapping(__UpperCamelCase )
snake_case_ = get_model_to_tester_mapping(__UpperCamelCase )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(get_test_info.to_json(__UpperCamelCase ) , __UpperCamelCase )
| 46
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
A = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__A = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__A = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__A = field(
default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A = field(
default=__snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
if self.train_file is not None:
snake_case_ = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case_ = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = 42
__A = True
__A = None
__A = None
def __call__( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'label' if 'label' in features[0].keys() else 'labels'
snake_case_ = [feature.pop(__UpperCamelCase ) for feature in features]
snake_case_ = len(__UpperCamelCase )
snake_case_ = len(features[0]['input_ids'] )
snake_case_ = [
[{k: v[i] for k, v in feature.items()} for i in range(__UpperCamelCase )] for feature in features
]
snake_case_ = list(chain(*__UpperCamelCase ) )
snake_case_ = self.tokenizer.pad(
__UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
snake_case_ = {k: v.view(__UpperCamelCase , __UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
snake_case_ = torch.tensor(__UpperCamelCase , dtype=torch.intaa )
return batch
def a():
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case_ = {}
if data_args.train_file is not None:
snake_case_ = data_args.train_file
if data_args.validation_file is not None:
snake_case_ = data_args.validation_file
snake_case_ = data_args.train_file.split('.' )[-1]
snake_case_ = load_dataset(
lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
snake_case_ = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case_ = [f"""ending{i}""" for i in range(4 )]
snake_case_ = 'sent1'
snake_case_ = 'sent2'
if data_args.max_seq_length is None:
snake_case_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
snake_case_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase__ ):
snake_case_ = [[context] * 4 for context in examples[context_name]]
snake_case_ = examples[question_header_name]
snake_case_ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase__ )
]
# Flatten out
snake_case_ = list(chain(*lowercase__ ) )
snake_case_ = list(chain(*lowercase__ ) )
# Tokenize
snake_case_ = tokenizer(
lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
snake_case_ = raw_datasets['train']
if data_args.max_train_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
snake_case_ = train_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
snake_case_ = raw_datasets['validation']
if data_args.max_eval_samples is not None:
snake_case_ = min(len(lowercase__ ) , data_args.max_eval_samples )
snake_case_ = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
snake_case_ = eval_dataset.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
snake_case_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase__ ):
snake_case_ , snake_case_ = eval_predictions
snake_case_ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
snake_case_ = None
if training_args.resume_from_checkpoint is not None:
snake_case_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ = last_checkpoint
snake_case_ = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('train' , lowercase__ )
trainer.save_metrics('train' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
snake_case_ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
snake_case_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def a(lowercase__ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 46
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A = 10
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(lowercase__ , lowercase__ ):
if array[i] == target:
return i
return -1
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(lowercase__ )
while left <= right:
if right - left < precision:
return lin_search(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
snake_case_ = (left + right) // 3 + 1
snake_case_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case_ = one_third - 1
elif array[two_third] < target:
snake_case_ = two_third + 1
else:
snake_case_ = one_third + 1
snake_case_ = two_third - 1
else:
return -1
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
snake_case_ = (left + right) // 3 + 1
snake_case_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase__ , one_third - 1 , lowercase__ , lowercase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase__ , lowercase__ , lowercase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase__ , lowercase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A = input('Enter numbers separated by comma:\n').strip()
A = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
A = int(input('Enter the number to be found in the list:\n').strip())
A = ite_ternary_search(collection, target)
A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 46
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 46
|
import operator as op
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = []
snake_case_ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
snake_case_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
else:
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
snake_case_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(lowercase__ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
A = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 46
| 1
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
# Construct model
if openai_config_file == "":
snake_case_ = OpenAIGPTConfig()
else:
snake_case_ = OpenAIGPTConfig.from_json_file(lowercase__ )
snake_case_ = OpenAIGPTModel(lowercase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
snake_case_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 46
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """bit"""
__A = ["""preactivation""", """bottleneck"""]
__A = ["""SAME""", """VALID"""]
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
| 46
| 1
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = LEDConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=4 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = tf.concat(
[tf.zeros_like(__UpperCamelCase )[:, :-1], tf.ones_like(__UpperCamelCase )[:, -1:]] , axis=-1 , )
snake_case_ = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFLEDModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'] )
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCamelCase ):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a(lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
A = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3 )
| 46
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = DDIMScheduler()
snake_case_ = self.dummy_vq_model
snake_case_ = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type='numpy' , return_dict=__UpperCamelCase )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type='numpy' ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
snake_case_ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 46
| 1
|
def a(lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ = [p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ = sorted(lowercase__ )
# declaring useful variables
snake_case_ = len(lowercase__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ = sorted_profit_by_weight[length - i - 1]
snake_case_ = profit_by_weight.index(lowercase__ )
snake_case_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
A = [int(x) for x in input('Input profits separated by spaces: ').split()]
A = [int(x) for x in input('Input weights separated by spaces: ').split()]
A = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 46
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = LEDConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=4 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = tf.concat(
[tf.zeros_like(__UpperCamelCase )[:, :-1], tf.ones_like(__UpperCamelCase )[:, -1:]] , axis=-1 , )
snake_case_ = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFLEDModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'] )
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCamelCase ):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a(lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
A = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3 )
| 46
| 1
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a(*lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
'''simple docstring'''
from .. import __version__
snake_case_ = take_from
snake_case_ = ()
if not isinstance(args[0] , lowercase__ ):
snake_case_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
snake_case_ = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
snake_case_ = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
snake_case_ = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
snake_case_ = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
snake_case_ = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
snake_case_ = inspect.getouterframes(inspect.currentframe() )[1]
snake_case_ = call_frame.filename
snake_case_ = call_frame.lineno
snake_case_ = call_frame.function
snake_case_ , snake_case_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 46
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
"""simple docstring"""
__A = """pixel_values"""
__A = False
__A = TimmBackboneConfig
def __init__( self , __UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , 'timm' )
super().__init__(__UpperCamelCase )
snake_case_ = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(__UpperCamelCase , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
snake_case_ = getattr(__UpperCamelCase , 'use_pretrained_backbone' , __UpperCamelCase )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case_ = config.out_indices if getattr(__UpperCamelCase , 'out_indices' , __UpperCamelCase ) is not None else (-1,)
snake_case_ = timm.create_model(
config.backbone , pretrained=__UpperCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCamelCase , **__UpperCamelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case_ = self._backbone.return_layers
snake_case_ = {layer['module']: str(__UpperCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCamelCase )
@classmethod
def __lowerCAmelCase ( cls , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case_ = kwargs.pop('config' , TimmBackboneConfig() )
snake_case_ = kwargs.pop('use_timm_backbone' , __UpperCamelCase )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
snake_case_ = kwargs.pop('num_channels' , config.num_channels )
snake_case_ = kwargs.pop('features_only' , config.features_only )
snake_case_ = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
snake_case_ = kwargs.pop('out_indices' , config.out_indices )
snake_case_ = TimmBackboneConfig(
backbone=__UpperCamelCase , num_channels=__UpperCamelCase , features_only=__UpperCamelCase , use_pretrained_backbone=__UpperCamelCase , out_indices=__UpperCamelCase , )
return super()._from_config(__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case_ = self._all_layers
snake_case_ = self._backbone(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = self._return_layers
snake_case_ = tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case_ = self._backbone(__UpperCamelCase , **__UpperCamelCase )
snake_case_ = None
snake_case_ = tuple(__UpperCamelCase )
snake_case_ = tuple(__UpperCamelCase ) if hidden_states is not None else None
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
snake_case_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCamelCase , hidden_states=__UpperCamelCase , attentions=__UpperCamelCase )
| 46
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.dummy_uncond_unet
snake_case_ = ScoreSdeVeScheduler()
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=__UpperCamelCase , return_dict=__UpperCamelCase )[
0
]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'google/ncsnpp-church-256'
snake_case_ = UNetaDModel.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVeScheduler.from_pretrained(__UpperCamelCase )
snake_case_ = ScoreSdeVePipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
sde_ve.to(__UpperCamelCase )
sde_ve.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = torch.manual_seed(0 )
snake_case_ = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=__UpperCamelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 46
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.