code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowercase__ ( snake_case_ , snake_case_ ):
'''simple docstring'''
A_ : int = 'resnet'
A_ : Dict = ['basic', 'bottleneck']
def __init__( self , __snake_case=3 , __snake_case=64 , __snake_case=[256, 512, 1024, 2048] , __snake_case=[3, 4, 6, 3] , __snake_case="bottleneck" , __snake_case="relu" , __snake_case=False , __snake_case=None , __snake_case=None , **__snake_case , ):
super().__init__(**__snake_case )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
_SCREAMING_SNAKE_CASE : Dict = num_channels
_SCREAMING_SNAKE_CASE : List[str] = embedding_size
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_sizes
_SCREAMING_SNAKE_CASE : Tuple = depths
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_type
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : Any = downsample_in_first_stage
_SCREAMING_SNAKE_CASE : List[Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(__snake_case ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
class lowercase__ ( snake_case_ ):
'''simple docstring'''
A_ : Tuple = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ):
return 1e-3
| 200
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class snake_case ( snake_case_ ):
'''simple docstring'''
A_ : List[Any] = 'deit'
def __init__( self : Tuple, _lowerCamelCase : Dict=7_68, _lowerCamelCase : int=12, _lowerCamelCase : Optional[int]=12, _lowerCamelCase : int=30_72, _lowerCamelCase : Dict="gelu", _lowerCamelCase : Optional[Any]=0.0, _lowerCamelCase : Any=0.0, _lowerCamelCase : Optional[int]=0.02, _lowerCamelCase : Union[str, Any]=1e-12, _lowerCamelCase : Union[str, Any]=2_24, _lowerCamelCase : Dict=16, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : List[Any]=True, _lowerCamelCase : Tuple=16, **_lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
__A = encoder_stride
class snake_case ( snake_case_ ):
'''simple docstring'''
A_ : Union[str, Any] = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return 1e-4
| 266
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowerCAmelCase ( snake_case_ , snake_case_ ):
lowercase = 'pixel_values'
lowercase = False
lowercase = TimmBackboneConfig
def __init__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(self , 'timm' )
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(__UpperCAmelCase , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
__UpperCamelCase = getattr(__UpperCAmelCase , 'use_pretrained_backbone' , __UpperCAmelCase )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
__UpperCamelCase = config.out_indices if getattr(__UpperCAmelCase , 'out_indices' , __UpperCAmelCase ) is not None else (-1,)
__UpperCamelCase = timm.create_model(
config.backbone , pretrained=__UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__UpperCAmelCase , **__UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__UpperCamelCase = self._backbone.return_layers
__UpperCamelCase = {layer['module']: str(__UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
__UpperCamelCase = kwargs.pop('config' , TimmBackboneConfig() )
__UpperCamelCase = kwargs.pop('use_timm_backbone' , __UpperCAmelCase )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
__UpperCamelCase = kwargs.pop('num_channels' , config.num_channels )
__UpperCamelCase = kwargs.pop('features_only' , config.features_only )
__UpperCamelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
__UpperCamelCase = kwargs.pop('out_indices' , config.out_indices )
__UpperCamelCase = TimmBackboneConfig(
backbone=__UpperCAmelCase , num_channels=__UpperCAmelCase , features_only=__UpperCAmelCase , use_pretrained_backbone=__UpperCAmelCase , out_indices=__UpperCAmelCase , )
return super()._from_config(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__UpperCamelCase = self._all_layers
__UpperCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = self._return_layers
__UpperCamelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
__UpperCamelCase = self._backbone(__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = None
__UpperCamelCase = tuple(__UpperCAmelCase )
__UpperCamelCase = tuple(__UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
__UpperCamelCase = (feature_maps,)
if output_hidden_states:
__UpperCamelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=__UpperCAmelCase , hidden_states=__UpperCAmelCase , attentions=__UpperCAmelCase )
| 316
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 312
| 0
|
from __future__ import annotations
_a = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class A_ :
def __init__( self : Any , UpperCAmelCase : dict[str, list[str]] , UpperCAmelCase : str ) -> str:
__lowerCAmelCase: int = graph
# mapping node to its parent in resulting breadth first tree
__lowerCAmelCase: str = {}
__lowerCAmelCase: Dict = source_vertex
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase: int = {self.source_vertex}
__lowerCAmelCase: Any = None
__lowerCAmelCase: List[str] = [self.source_vertex] # first in first out queue
while queue:
__lowerCAmelCase: str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
__lowerCAmelCase: List[str] = vertex
queue.append(UpperCAmelCase )
def UpperCAmelCase ( self : str , UpperCAmelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCAmelCase: Optional[Any] = self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
__lowerCAmelCase: Optional[Any] = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_a = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 322
|
import functools
from typing import Any
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : list[str] ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not all(
isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
A_ = {}
A_ = "WORD_KEEPER"
for word in words:
A_ = trie
for c in word:
if c not in trie_node:
A_ = {}
A_ = trie_node[c]
A_ = True
A_ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
A_ = trie
for i in range(__UpperCamelCase ,__UpperCamelCase ):
A_ = trie_node.get(string[i] ,__UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase ,__UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__a :List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__a :Optional[int] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__a :str = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = ElectraTokenizer
def __init__( self : Tuple , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=True , UpperCAmelCase : Any="[UNK]" , UpperCAmelCase : Union[str, Any]="[SEP]" , UpperCAmelCase : List[Any]="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : List[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 312
| 0
|
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self , _a=None , _a=None ) -> Optional[Any]:
# Input as list
_A : Union[str, Any] = list(poly_a or [0] )[:]
_A : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_A : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_A : Optional[int] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_A : str = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_A : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_A : int = self.__multiply()
def a__ ( self , _a ) -> Optional[int]:
_A : Union[str, Any] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_a ) <= 1:
return dft[0]
#
_A : int = self.c_max_length // 2
while next_ncol > 0:
_A : str = [[] for i in range(_a )]
_A : Union[str, Any] = self.root**next_ncol
# First half of next step
_A : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_A : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_A : Any = new_dft
_A : Any = next_ncol // 2
return dft[0]
def a__ ( self ) -> Any:
_A : str = self.__dft("""A""" )
_A : List[str] = self.__dft("""B""" )
_A : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_A : Any = 2
while next_ncol <= self.c_max_length:
_A : Any = [[] for i in range(_a )]
_A : Optional[int] = self.root ** (next_ncol // 2)
_A : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_A : Optional[int] = new_inverse_c
next_ncol *= 2
# Unpack
_A : Any = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> int:
_A : Optional[int] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_A : int = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_A : Optional[Any] = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 312
| 0
|
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
if (stress, tangential_force, area).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif stress < 0:
raise ValueError("Stress cannot be negative")
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative")
elif area < 0:
raise ValueError("Area cannot be negative")
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 312
| 0
|
from __future__ import annotations
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_UpperCAmelCase = number_of_bytes // partitions
_UpperCAmelCase = []
for i in range(__UpperCamelCase ):
_UpperCAmelCase = i * bytes_per_partition + 1
_UpperCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = {'vocab_file': 'spiece.model'}
A__ : Optional[int] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
A__ : Optional[int] = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class lowercase__ ( snake_case_ ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Dict = ['input_ids', 'attention_mask']
_UpperCAmelCase :List[int] = []
def __init__( self : Any , snake_case__ : Tuple , snake_case__ : Optional[Any]="<unk>" , snake_case__ : int="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="<pad>" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : int="[CLS]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Dict , ):
lowerCamelCase_ : List[Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowerCamelCase_ : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowerCamelCase_ : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowerCamelCase_ : Union[str, Any] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
lowerCamelCase_ : Any =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowerCamelCase_ : Tuple =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
lowerCamelCase_ : Dict ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , mask_token=snake_case__ , cls_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCamelCase_ : Any =vocab_file
lowerCamelCase_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return self.sp_model.get_piece_size()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : int ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
lowerCamelCase_ : Optional[int] =self.__dict__.copy()
lowerCamelCase_ : str =None
return state
def __setstate__( self : Tuple , snake_case__ : List[str] ):
lowerCamelCase_ : str =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ : List[str] ={}
lowerCamelCase_ : Any =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str ):
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ):
return self.sp_model.piece_to_id(snake_case__ )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : int ):
lowerCamelCase_ : List[Any] =self.sp_model.IdToPiece(snake_case__ )
return token
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Optional[int] ):
lowerCamelCase_ : List[str] =[]
lowerCamelCase_ : List[str] =""
lowerCamelCase_ : str =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
lowerCamelCase_ : Union[str, Any] =True
lowerCamelCase_ : Union[str, Any] =[]
else:
current_sub_tokens.append(snake_case__ )
lowerCamelCase_ : int =False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[int] , snake_case__ : bool = False , snake_case__ : bool = None , snake_case__ : bool = True , **snake_case__ : Optional[int] , ):
lowerCamelCase_ : int =kwargs.pop("use_source_tokenizer" , snake_case__ )
lowerCamelCase_ : int =self.convert_ids_to_tokens(snake_case__ , skip_special_tokens=snake_case__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ : List[str] =[]
lowerCamelCase_ : int =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
lowerCamelCase_ : Optional[Any] =[]
sub_texts.append(snake_case__ )
else:
current_sub_text.append(snake_case__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase_ : int =re.sub(r" (\[(MASK|SEP)\])" , r"\1" , " ".join(snake_case__ ) )
else:
lowerCamelCase_ : List[Any] ="".join(snake_case__ )
lowerCamelCase_ : List[str] =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ : Union[str, Any] =self.clean_up_tokenization(snake_case__ )
return clean_text
else:
return text
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : Optional[int] =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
lowerCamelCase_ : Tuple =self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : Dict =[self.cls_token_id]
lowerCamelCase_ : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : Union[str, Any] =[self.sep_token_id]
lowerCamelCase_ : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 144
|
def __snake_case ( __UpperCamelCase : bytes ):
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280
|
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (snake_case_ ):
"""simple docstring"""
__a =['input_features']
def __init__( self : List[Any] , __a : Optional[Any]=80 , __a : List[Any]=1_60_00 , __a : List[str]=1_60 , __a : str=30 , __a : str=4_00 , __a : Optional[int]=0.0 , __a : Union[str, Any]=False , **__a : List[Any] , ):
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , )
def UpperCamelCase__ ( self : str , __a : np.array ):
_a = spectrogram(
__a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_a = log_spec[:, :-1]
_a = np.maximum(__a , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase__ ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ):
if attention_mask is not None:
_a = np.array(__a , np.intaa )
_a = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__a )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = True , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , __a : Optional[str] = "max_length" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , **__a : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_a = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
_a = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_a = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
_a = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def UpperCamelCase__ ( self : Optional[Any] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 63
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 312
| 0
|
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
_SCREAMING_SNAKE_CASE : int = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
_SCREAMING_SNAKE_CASE : Optional[int] = string.ascii_uppercase.find(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = num - key
if num < 0:
_SCREAMING_SNAKE_CASE : List[Any] = num + len(string.ascii_uppercase )
_SCREAMING_SNAKE_CASE : str = translated + string.ascii_uppercase[num]
else:
_SCREAMING_SNAKE_CASE : Any = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = input("""Encrypted message: """ )
_SCREAMING_SNAKE_CASE : Dict = message.upper()
decrypt(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 200
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : Tuple = 'OwlViTImageProcessor'
_lowerCamelCase : List[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[Any] , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Any ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[str]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="max_length" , UpperCAmelCase : Optional[Any]="np" , **UpperCAmelCase : Optional[int] ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ) or (isinstance(UpperCAmelCase , UpperCAmelCase ) and not isinstance(text[0] , UpperCAmelCase )):
A_ = [self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )]
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and isinstance(text[0] , UpperCAmelCase ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase ) != max_num_queries:
A_ = t + [" "] * (max_num_queries - len(UpperCAmelCase ))
A_ = self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
encodings.append(UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
A_ = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
return self.image_processor.post_process(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 312
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase ( __UpperCamelCase ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase ( ):
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__A = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('''unsupported backend''' ):
map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = [1, 2]
__A = {'''a''': 1, '''b''': 2}
__A = {'''a''': [1, 2], '''b''': [3, 4]}
__A = {'''a''': {'''1''': 1}, '''b''': 2}
__A = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__A = [2, 3]
__A = {'''a''': 2, '''b''': 3}
__A = {'''a''': [2, 3], '''b''': [4, 5]}
__A = {'''a''': {'''1''': 2}, '''b''': 3}
__A = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 266
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds
| 312
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( snake_case_ ):
lowercase = 'ClapFeatureExtractor'
lowercase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
__UpperCamelCase = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if audios is not None:
__UpperCamelCase = self.feature_extractor(
__UpperCAmelCase , sampling_rate=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and audios is not None:
__UpperCamelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 316
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( __UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : NDArray[floataa] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ,):
"""simple docstring"""
A_ , A_ = coefficient_matrix.shape
A_ , A_ = constant_matrix.shape
if rowsa != colsa:
A_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if colsa != 1:
A_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__UpperCamelCase )
if rowsa != rowsa:
A_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != rowsa:
A_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(__UpperCamelCase )} and {rowsa}'''
)
raise ValueError(__UpperCamelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
A_ = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ = table.shape
strictly_diagonally_dominant(__UpperCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(__UpperCamelCase ):
A_ = []
for row in range(__UpperCamelCase ):
A_ = 0
for col in range(__UpperCamelCase ):
if col == row:
A_ = table[row][col]
elif col == cols - 1:
A_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ = (temp + val) / denom
new_val.append(__UpperCamelCase )
A_ = new_val
return [float(__UpperCamelCase ) for i in new_val]
def __snake_case ( __UpperCamelCase : NDArray[floataa] ):
"""simple docstring"""
A_ , A_ = table.shape
A_ = True
for i in range(0 ,__UpperCamelCase ):
A_ = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
def _a ( SCREAMING_SNAKE_CASE : int = 50 ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: Dict = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"{solution() = }")
| 322
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __snake_case ( ):
"""simple docstring"""
A_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A_ = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = get_dataset()
A_ = make_duplicate_clusters(UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __A ( self : List[Any] ):
A_ = get_dataset()
A_ , A_ = deduplicate_dataset(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 2 )
print(UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCAmelCase )
| 312
| 0
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case_ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__()
UpperCamelCase : Union[str, Any] = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = True, ) -> Any:
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.nets ) ):
UpperCamelCase , UpperCamelCase : str = controlnet(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
# merge samples
if i == 0:
UpperCamelCase , UpperCamelCase : str = down_samples, mid_sample
else:
UpperCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Any = 0
UpperCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_, is_main_process=SCREAMING_SNAKE_CASE_, save_function=SCREAMING_SNAKE_CASE_, safe_serialization=SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_, )
idx += 1
UpperCamelCase : Union[str, Any] = model_path_to_save + F"""_{idx}"""
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Tuple = 0
UpperCamelCase : Tuple = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase : str = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
controlnets.append(SCREAMING_SNAKE_CASE_ )
idx += 1
UpperCamelCase : str = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(SCREAMING_SNAKE_CASE_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(SCREAMING_SNAKE_CASE_ )
| 119
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike]
| 312
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
__a :Dict = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 312
| 0
|
from collections.abc import Sequence
def lowercase_ ( _lowerCamelCase : Sequence[int] | None = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowercase__ : List[Any] = nums[0]
for i in range(1 , len(__UpperCamelCase)):
lowercase__ : Dict = nums[i]
lowercase__ : Tuple = max(__UpperCamelCase , ans + num , __UpperCamelCase)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
UpperCamelCase = int(input('''Enter number of elements : ''').strip())
UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 87
|
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 312
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
_UpperCAmelCase = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any]=None ) -> int:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_UpperCAmelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__UpperCamelCase ):
_UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=__UpperCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase )
_UpperCAmelCase = result.headers['Location']
_UpperCAmelCase = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase )
_UpperCAmelCase = os.path.join(__UpperCamelCase , F"{artifact_name}.zip" )
with open(__UpperCamelCase , 'wb' ) as fp:
fp.write(response.content )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=None ) -> str:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(__UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__UpperCamelCase ) as f:
for line in f:
_UpperCAmelCase = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(': ' )]
_UpperCAmelCase = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_UpperCAmelCase = line[len('FAILED ' ) :]
failed_tests.append(__UpperCamelCase )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` "
F"and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(__UpperCamelCase , __UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )]
return result
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=None ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) )
return errors
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int=None ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def A ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = test.split('::' )[0]
if test.startswith('tests/models/' ):
_UpperCAmelCase = test.split('/' )[2]
else:
_UpperCAmelCase = None
return test
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {'count': n_errors, 'errors': error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=__UpperCamelCase ) )
return r
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = '| no. | error | status |'
_UpperCAmelCase = '|-:|:-|:-|'
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]['count']
_UpperCAmelCase = F"| {count} | {error[:100]} | |"
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
def A ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = '| model | no. of errors | major error | count |'
_UpperCAmelCase = '|-:|-:|-:|-:|'
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]['count']
_UpperCAmelCase , _UpperCAmelCase = list(reduced_by_model[model]['errors'].items() )[0]
_UpperCAmelCase = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(__UpperCamelCase )
return "\n".join(__UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCAmelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase__ = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase__ = k.find(" / ")
UpperCAmelCase__ = k[index + len(" / ") :]
UpperCAmelCase__ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ = reduce_by_error(errors)
UpperCAmelCase__ = reduce_by_model(errors)
UpperCAmelCase__ = make_github_table(reduced_by_error)
UpperCAmelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 339
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward()
| 312
| 0
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a__ : int = logging.get_logger(__name__)
def UpperCAmelCase_( a__=None , a__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__SCREAMING_SNAKE_CASE : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Benchmark training of model'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Verbose memory tracing'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Trace memory line by line'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Save result to a CSV file'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Save all print statements in a log file'} )
__SCREAMING_SNAKE_CASE : bool = field(default=a__ , metadata={'help': 'Whether to print environment information'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"train_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"env_info_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default=f"log_{round(time() )}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__SCREAMING_SNAKE_CASE : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def __lowerCAmelCase ( self ) ->Optional[int]:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _lowerCamelCase , )
def __lowerCAmelCase ( self ) ->List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __lowerCAmelCase ( self ) ->List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self ) ->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = (3, 32, 128)
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
SCREAMING_SNAKE_CASE : List[str] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->List[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : str = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : str = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[str] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = '''test'''
SCREAMING_SNAKE_CASE : Any = processor(text=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = '''test'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : int = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Optional[Any] = processor.char_decode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = MgpstrProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(1 , 27 , 38 )
SCREAMING_SNAKE_CASE : Dict = torch.randn(1 , 27 , 5_0257 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(1 , 27 , 3_0522 )
SCREAMING_SNAKE_CASE : Dict = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a__ : Dict = logging.get_logger(__name__)
a__ : int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'perceiver'
def __init__( self , _lowerCamelCase=256 , _lowerCamelCase=1280 , _lowerCamelCase=768 , _lowerCamelCase=1 , _lowerCamelCase=26 , _lowerCamelCase=8 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="kv" , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=262 , _lowerCamelCase=2048 , _lowerCamelCase=56 , _lowerCamelCase=[368, 496] , _lowerCamelCase=16 , _lowerCamelCase=1920 , _lowerCamelCase=16 , _lowerCamelCase=[1, 16, 224, 224] , **_lowerCamelCase , ) ->str:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_latents
SCREAMING_SNAKE_CASE : Any = d_latents
SCREAMING_SNAKE_CASE : List[Any] = d_model
SCREAMING_SNAKE_CASE : str = num_blocks
SCREAMING_SNAKE_CASE : Optional[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE : int = num_self_attention_heads
SCREAMING_SNAKE_CASE : Any = num_cross_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = qk_channels
SCREAMING_SNAKE_CASE : Dict = v_channels
SCREAMING_SNAKE_CASE : List[Any] = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE : Union[str, Any] = self_attention_widening_factor
SCREAMING_SNAKE_CASE : Tuple = cross_attention_widening_factor
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE : Tuple = image_size
# flow attributes
SCREAMING_SNAKE_CASE : Union[str, Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE : Tuple = num_frames
SCREAMING_SNAKE_CASE : List[Any] = audio_samples_per_frame
SCREAMING_SNAKE_CASE : str = samples_per_patch
SCREAMING_SNAKE_CASE : List[str] = output_shape
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __lowerCAmelCase ( self ) ->float:
return 1e-4
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 40 , _lowerCamelCase = 40 , ) ->Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = preprocessor.num_special_tokens_to_add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Any = dict(preprocessor(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : str = compute_effective_axis_dimension(_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def UpperCAmelCase_( a__ , a__ , a__=8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCAmelCase_( a__ , a__=512 , a__=512 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE : Any = np.array(pil_image.convert('''RGB''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
SCREAMING_SNAKE_CASE : Optional[Any] = np.transpose(a__ , [2, 0, 1] )
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(a__ ).unsqueeze(0 )
return image
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->str:
super().__init__()
self.register_modules(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , movq=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
# get the original timestep using init_timestep
SCREAMING_SNAKE_CASE : Union[str, Any] = min(int(num_inference_steps * strength ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->int:
if not isinstance(_lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = image.to(device=_lowerCamelCase , dtype=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE : Any = image
else:
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCamelCase )
]
SCREAMING_SNAKE_CASE : Any = torch.cat(_lowerCamelCase , dim=0 )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.movq.encode(_lowerCamelCase ).latent_dist.sample(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE : Tuple = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE : str = init_latents.shape
SCREAMING_SNAKE_CASE : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
# get latents
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = init_latents
return latents
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase=0 ) ->Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = cpu_offload_with_hook(_lowerCamelCase , _lowerCamelCase , prev_module_hook=_lowerCamelCase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) ->Tuple:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 100 , _lowerCamelCase = 4.0 , _lowerCamelCase = 0.3 , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self._execution_device
SCREAMING_SNAKE_CASE : str = guidance_scale > 1.0
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(_lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE : str = image_embeds.shape[0]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : str = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE : int = negative_image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCamelCase )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = [image]
if not all(isinstance(_lowerCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(_lowerCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE : str = torch.cat([prepare_image(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = image.to(dtype=image_embeds.dtype , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.movq.encode(_lowerCamelCase )['''latents''']
SCREAMING_SNAKE_CASE : Dict = latents.repeat_interleave(_lowerCamelCase , dim=0 )
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.get_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = downscale_height_and_width(_lowerCamelCase , _lowerCamelCase , self.movq_scale_factor )
SCREAMING_SNAKE_CASE : Any = self.prepare_latents(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Tuple = {'''image_embeds''': image_embeds}
SCREAMING_SNAKE_CASE : Tuple = self.unet(
sample=_lowerCamelCase , timestep=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , added_cond_kwargs=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase , )[0]
# post-processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.movq.decode(_lowerCamelCase , force_not_quantize=_lowerCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE : int = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : str = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
from math import factorial, radians
def UpperCAmelCase_( a__ , a__ = 18 , a__ = 10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE : Dict = radians(a__ )
SCREAMING_SNAKE_CASE : Dict = angle_in_radians
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
for _ in range(a__ ):
result += (b * (angle_in_radians**a)) / factorial(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a__ , a__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE : Optional[int] = len(a__ ) if (len(a__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(a__ ) , '''Postfix'''.center(a__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a__ ) == 0:
stack.append(a__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(a__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format
while len(a__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(a__ )).ljust(a__ ) , (''''''.join(a__ )).ljust(a__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(a__ ) # return Postfix as str
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = list(infix[::-1] ) # reverse the infix equation
for i in range(len(a__ ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE : List[Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE : int = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a__ : Optional[Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
a__ : Tuple = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
if n == 0:
return 0
SCREAMING_SNAKE_CASE : Optional[int] = float('''-inf''' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : str = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
SCREAMING_SNAKE_CASE : Dict = float('''-inf''' )
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : Optional[Any] = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
SCREAMING_SNAKE_CASE : Tuple = max_revenue
return max_rev[n]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
SCREAMING_SNAKE_CASE : int = [float('''-inf''' ) for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : Any = max_rev[i]
for j in range(1 , i + 1 ):
SCREAMING_SNAKE_CASE : Any = max(a__ , prices[j - 1] + max_rev[i - j] )
SCREAMING_SNAKE_CASE : Optional[int] = max_revenue_i
return max_rev[n]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if n < 0:
SCREAMING_SNAKE_CASE : int = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(a__ )
if n > len(a__ ):
SCREAMING_SNAKE_CASE : Dict = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(a__ )}"""
)
raise ValueError(a__ )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [6, 10, 12, 15, 20, 23]
SCREAMING_SNAKE_CASE : Tuple = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
SCREAMING_SNAKE_CASE : Tuple = 36
SCREAMING_SNAKE_CASE : Union[str, Any] = top_down_cut_rod(a__ , a__ )
SCREAMING_SNAKE_CASE : str = bottom_up_cut_rod(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
a__ : List[str] = logging.getLogger(__name__)
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
__SCREAMING_SNAKE_CASE : int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'A csv or a json file containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=a__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
SCREAMING_SNAKE_CASE : Dict = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE : Tuple = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
default=a__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(a__ )
datasets.utils.logging.set_verbosity(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE : Optional[int] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.train_file.split('''.''' )[-1]
SCREAMING_SNAKE_CASE : int = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE : Optional[Any] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE : Dict = load_dataset('''csv''' , data_files=a__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''json''' , data_files=a__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE : Dict = raw_datasets['''train'''].features['''label'''].names
SCREAMING_SNAKE_CASE : int = len(a__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a__ , )
SCREAMING_SNAKE_CASE : Dict = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : str = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE : Any = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE : Optional[int] = {'''Refused''': 0, '''Entailed''': 1}
SCREAMING_SNAKE_CASE : str = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
SCREAMING_SNAKE_CASE : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(a__ ):
SCREAMING_SNAKE_CASE : int = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
SCREAMING_SNAKE_CASE : Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE : str = examples['''statement''']
SCREAMING_SNAKE_CASE : Tuple = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
SCREAMING_SNAKE_CASE : int = tokenizer(a__ , a__ , padding=a__ , max_length=a__ , truncation=a__ )
SCREAMING_SNAKE_CASE : int = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets.map(
a__ , batched=a__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
SCREAMING_SNAKE_CASE : str = raw_datasets['''train''']
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
SCREAMING_SNAKE_CASE : Optional[Any] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
SCREAMING_SNAKE_CASE : Any = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__ ):
SCREAMING_SNAKE_CASE : Dict = p.predictions[0] if isinstance(p.predictions , a__ ) else p.predictions
SCREAMING_SNAKE_CASE : Dict = np.argmax(a__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE : List[str] = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Optional[int] = Trainer(
model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a__ , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : List[str] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : List[str] = last_checkpoint
SCREAMING_SNAKE_CASE : Dict = trainer.train(resume_from_checkpoint=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = train_result.metrics
SCREAMING_SNAKE_CASE : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
SCREAMING_SNAKE_CASE : Dict = min(a__ , len(a__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , a__ )
trainer.save_metrics('''train''' , a__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE : Any = trainer.evaluate(eval_dataset=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = min(a__ , len(a__ ) )
trainer.log_metrics('''eval''' , a__ )
trainer.save_metrics('''eval''' , a__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE : List[str] = predict_dataset.remove_columns('''label''' )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.predict(a__ , metric_key_prefix='''predict''' ).predictions
SCREAMING_SNAKE_CASE : Union[str, Any] = np.argmax(a__ , axis=1 )
SCREAMING_SNAKE_CASE : Any = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(a__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(a__ ):
SCREAMING_SNAKE_CASE : Tuple = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
SCREAMING_SNAKE_CASE : Any = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
from __future__ import annotations
a__ : str = 1.6_021E-19 # units = C
def UpperCAmelCase_( a__ , a__ , a__ , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
a__ : Tuple = 0 # The first color of the flag.
a__ : Tuple = 1 # The second color of the flag.
a__ : Optional[int] = 2 # The third color of the flag.
a__ : int = (red, white, blue)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not sequence:
return []
if len(a__ ) == 1:
return list(a__ )
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[str] = len(a__ ) - 1
SCREAMING_SNAKE_CASE : Dict = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = input('''Enter numbers separated by commas:\n''').strip()
a__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(F"{dutch_national_flag_sort(unsorted)}")
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=True , ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : Dict = min_resolution
SCREAMING_SNAKE_CASE : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Tuple = size_divisor
SCREAMING_SNAKE_CASE : Any = do_rescale
def __lowerCAmelCase ( self ) ->Dict:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = GLPNImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = GLPNImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''resample''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_rescale''' ) )
def __lowerCAmelCase ( self ) ->List[Any]:
pass
def __lowerCAmelCase ( self ) ->List[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowerCAmelCase ( self ) ->Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowerCAmelCase ( self ) ->Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
from __future__ import annotations
from math import pi
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = []
for line in lines:
SCREAMING_SNAKE_CASE : str = re.sub(r'''#.*''' , '''''' , a__ ) # remove comments
if line:
filtered_lines.append(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(a__ )
# Make a hash from all this code
SCREAMING_SNAKE_CASE : Optional[int] = full_str.encode('''utf-8''' )
return shaaaa(a__ ).hexdigest()
# get importable module names and hash for caching
a__ : str = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a__ : Optional[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a__ : str = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
a__ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : str = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) ->str:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=_lowerCamelCase , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Any = OpenLlamaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Tuple = OpenLlamaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = OpenLlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = OpenLlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : str = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE : Optional[int] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Dict = 3
SCREAMING_SNAKE_CASE : str = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.ne(1 ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = '''single_label_classification'''
SCREAMING_SNAKE_CASE : List[str] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Optional[int] = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Optional[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : Optional[int] = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : List[Any] = OpenLlamaModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
SCREAMING_SNAKE_CASE : Any = original_model(_lowerCamelCase ).last_hidden_state
SCREAMING_SNAKE_CASE : List[str] = original_model(_lowerCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
SCREAMING_SNAKE_CASE : Optional[int] = OpenLlamaModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = scaled_model(_lowerCamelCase ).last_hidden_state
SCREAMING_SNAKE_CASE : int = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE : Any = 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE : str = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
a__ : List[str] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"The Catalan numbers from 0 through {N} are:")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=[1, 1, 2] , _lowerCamelCase=1 , _lowerCamelCase=32 , _lowerCamelCase=4 , _lowerCamelCase=8 , _lowerCamelCase=37 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=512 , _lowerCamelCase=3 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=False , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = block_sizes
SCREAMING_SNAKE_CASE : Dict = num_decoder_layers
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : Dict = n_head
SCREAMING_SNAKE_CASE : int = d_head
SCREAMING_SNAKE_CASE : Dict = d_inner
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
SCREAMING_SNAKE_CASE : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : Any = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : int = self.num_hidden_layers + 2
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : str = TFFunnelModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Any:
SCREAMING_SNAKE_CASE : str = TFFunnelBaseModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelBaseModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = TFFunnelBaseModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = TFFunnelForPreTraining(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : str = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->List[str]:
SCREAMING_SNAKE_CASE : List[str] = TFFunnelForMaskedLM(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForSequenceClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE : Optional[int] = TFFunnelForMultipleChoice(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : str = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : int = TFFunnelForTokenClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = TFFunnelForQuestionAnswering(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@require_tf
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModelTester(self , base=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ : Any = logging.get_logger(__name__)
class a_ ( enum.Enum ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Tuple = 1
@add_end_docstrings(a__ )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'generated'
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) ->str:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Any = {}
if truncation is not None:
SCREAMING_SNAKE_CASE : int = truncation
SCREAMING_SNAKE_CASE : Any = generate_kwargs
SCREAMING_SNAKE_CASE : Tuple = {}
if return_tensors is not None and return_type is None:
SCREAMING_SNAKE_CASE : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
SCREAMING_SNAKE_CASE : Dict = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE : Any = self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
if len(_lowerCamelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
SCREAMING_SNAKE_CASE : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
return True
def __lowerCAmelCase ( self , *_lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Any = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _lowerCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
SCREAMING_SNAKE_CASE : List[str] = ([prefix + arg for arg in args[0]],)
SCREAMING_SNAKE_CASE : Tuple = True
elif isinstance(args[0] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = (prefix + args[0],)
SCREAMING_SNAKE_CASE : Dict = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
SCREAMING_SNAKE_CASE : int = self.tokenizer(*_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : int = super().__call__(*_lowerCamelCase , **_lowerCamelCase )
if (
isinstance(args[0] , _lowerCamelCase )
and all(isinstance(_lowerCamelCase , _lowerCamelCase ) for el in args[0] )
and all(len(_lowerCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = self._parse_and_tokenize(_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase )
return inputs
def __lowerCAmelCase ( self , _lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if self.framework == "pt":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
SCREAMING_SNAKE_CASE : List[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_lowerCamelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.generate(**_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = output_ids.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE : Union[str, Any] = output_ids.reshape(_lowerCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE : List[str] = tf.reshape(_lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=ReturnType.TEXT , _lowerCamelCase=False ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE : Dict = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
SCREAMING_SNAKE_CASE : Tuple = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , )
}
records.append(_lowerCamelCase )
return records
@add_end_docstrings(a__ )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 'summary'
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->bool:
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(a__ )
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 'translation'
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Any:
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __lowerCAmelCase ( self , *_lowerCamelCase , _lowerCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , _lowerCamelCase=None , _lowerCamelCase=None ) ->Optional[int]:
if getattr(self.tokenizer , '''_build_translation_inputs''' , _lowerCamelCase ):
return self.tokenizer._build_translation_inputs(
*_lowerCamelCase , return_tensors=self.framework , truncation=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase )
else:
return super()._parse_and_tokenize(*_lowerCamelCase , truncation=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = super()._sanitize_parameters(**_lowerCamelCase )
if src_lang is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
if tgt_lang is not None:
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.get('''task''' , self.task )
SCREAMING_SNAKE_CASE : int = task.split('''_''' )
if task and len(_lowerCamelCase ) == 4:
# translation, XX, to YY
SCREAMING_SNAKE_CASE : Any = items[1]
SCREAMING_SNAKE_CASE : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->str:
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = '''Hello, World!'''
a__ : Union[str, Any] = '''en_XX'''
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Path('''data_bin''' )
SCREAMING_SNAKE_CASE : Any = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a__ ).parent ) , checkpoint_file=Path(a__ ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a__ ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a__ ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(a__ )
SCREAMING_SNAKE_CASE : str = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE : str = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a__ )
SCREAMING_SNAKE_CASE : Optional[int] = XmodForSequenceClassification(a__ ) if classification_head else XmodForMaskedLM(a__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE : Optional[int] = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE : Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE : int = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE : Optional[Any] = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE : Any = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
SCREAMING_SNAKE_CASE : str = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE : Tuple = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
SCREAMING_SNAKE_CASE : int = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE : Optional[int] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
SCREAMING_SNAKE_CASE : List[str] = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE : Tuple = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE : Union[str, Any] = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE : Dict = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE : Any = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE : Optional[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE : Optional[Any] = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE : int = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE : int = from_adapter.fca.weight
SCREAMING_SNAKE_CASE : Dict = from_adapter.fca.bias
SCREAMING_SNAKE_CASE : Tuple = from_adapter.fca.weight
SCREAMING_SNAKE_CASE : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE : int = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE : List[str] = xmod.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE : Dict = xmod.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE : str = xmod.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE : Optional[int] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE : Dict = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE : Any = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE : List[str] = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE : Tuple = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE : Dict = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE : List[str] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE : Optional[int] = xmod.encode(a__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(a__ )
SCREAMING_SNAKE_CASE : Tuple = model(a__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE : List[Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a__ ) )
else:
SCREAMING_SNAKE_CASE : List[Any] = xmod.model(a__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE : str = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE : Any = torch.allclose(a__ , a__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(a__ ).mkdir(parents=a__ , exist_ok=a__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
a__ : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BioGptTokenizer
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Any = '''lower newer'''
SCREAMING_SNAKE_CASE : int = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = BioGptTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE : List[str] = '''lower'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_( a__ = 1_000_000 , a__ = 10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : defaultdict = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE : List[str] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
a__ : str = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class a_ ( a__ ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowerCamelCase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , ) ->List[str]:
SCREAMING_SNAKE_CASE : str = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = model_type
SCREAMING_SNAKE_CASE : List[Any] = tf_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = pytorch_dump_output
SCREAMING_SNAKE_CASE : Optional[int] = config
SCREAMING_SNAKE_CASE : Union[str, Any] = finetuning_task_name
def __lowerCAmelCase ( self ) ->Union[str, Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE : Optional[Any] = self._tf_checkpoint
SCREAMING_SNAKE_CASE : int = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self._tf_checkpoint
SCREAMING_SNAKE_CASE : Tuple = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCamelCase , self._config , self._pytorch_dump_output , _lowerCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ) ->None:
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : Any = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE : int = do_convert_rgb
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
SCREAMING_SNAKE_CASE : int = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) ->int:
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) ->np.ndarray:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) ->PIL.Image.Image:
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(_lowerCamelCase , param_name='''size''' , default_to_square=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : str = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE : List[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE : List[Any] = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Tuple = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE : List[Any] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BarthezTokenizer
__SCREAMING_SNAKE_CASE : Dict = BarthezTokenizerFast
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
def __lowerCAmelCase ( self ) ->Dict:
super().setUp()
SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = '''<pad>'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowerCamelCase ) , 10_1122 )
def __lowerCAmelCase ( self ) ->List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE : Tuple = [0, 57, 3018, 7_0307, 91, 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(
_lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Dict = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->Tuple:
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE : List[str] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_lowerCamelCase , )
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape
SCREAMING_SNAKE_CASE : str = nn.Linear(a__ , a__ , bias=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.data
return lin_layer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.load(a__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
SCREAMING_SNAKE_CASE : Optional[Any] = mam_aaa['''model''']
remove_ignore_keys_(a__ )
SCREAMING_SNAKE_CASE : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE : List[str] = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE : Dict = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : Dict = parser.parse_args()
a__ : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a__ : Union[str, Any] = logging.getLogger()
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
return args.f
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(_lowerCamelCase , '''argv''' , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowerCamelCase , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(_lowerCamelCase )
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('''RGB''' )
return image
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
SCREAMING_SNAKE_CASE : str = qkv_bias
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 364 if '''coco''' in model_name else 224
SCREAMING_SNAKE_CASE : Optional[Any] = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE : Any = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_( a__ , a__=None , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
SCREAMING_SNAKE_CASE : Any = tokenizer('''\n''' , add_special_tokens=a__ ).input_ids[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(a__ , eos_token_id=a__ )
SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(a__ ).eval()
SCREAMING_SNAKE_CASE : Optional[int] = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
SCREAMING_SNAKE_CASE : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
SCREAMING_SNAKE_CASE : List[Any] = original_model.state_dict()
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : Any = state_dict.pop(a__ )
if key.startswith('''Qformer.bert''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
SCREAMING_SNAKE_CASE : List[Any] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
SCREAMING_SNAKE_CASE : Any = key.replace('''t5''' , '''language''' )
SCREAMING_SNAKE_CASE : List[str] = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE : Optional[int] = load_demo_image()
SCREAMING_SNAKE_CASE : Tuple = vis_processors['''eval'''](a__ ).unsqueeze(0 ).to(a__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(a__ )
# create processor
SCREAMING_SNAKE_CASE : Optional[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : Dict = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=a__ , return_tensors='''pt''' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
SCREAMING_SNAKE_CASE : Dict = hf_model(a__ , a__ ).logits
else:
SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
SCREAMING_SNAKE_CASE : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE : Any = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=a__ )
else:
# cast to same type
SCREAMING_SNAKE_CASE : Any = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
SCREAMING_SNAKE_CASE : List[str] = ''''''
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(a__ , return_tensors='''pt''' ).input_ids.to(a__ )
SCREAMING_SNAKE_CASE : int = original_model.generate({'''image''': original_pixel_values} )
SCREAMING_SNAKE_CASE : List[str] = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
SCREAMING_SNAKE_CASE : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
SCREAMING_SNAKE_CASE : Dict = [text.strip() for text in output_text]
print('''HF generation:''' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
a__ : List[str] = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a__ : List[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , ) ->Tuple:
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = 13
SCREAMING_SNAKE_CASE : Any = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : int = 99
SCREAMING_SNAKE_CASE : List[Any] = 32
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = 4
SCREAMING_SNAKE_CASE : Optional[int] = 37
SCREAMING_SNAKE_CASE : Optional[Any] = '''gelu'''
SCREAMING_SNAKE_CASE : Dict = 0.1
SCREAMING_SNAKE_CASE : Tuple = 0.1
SCREAMING_SNAKE_CASE : int = 512
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : Tuple = 0.0_2
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Optional[Any] = 4
SCREAMING_SNAKE_CASE : List[str] = None
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = TFDistilBertModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : List[str] = TFDistilBertForMaskedLM(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = TFDistilBertForQuestionAnswering(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFDistilBertForSequenceClassification(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.num_choices
SCREAMING_SNAKE_CASE : Tuple = TFDistilBertForMultipleChoice(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : str = tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = TFDistilBertForTokenClassification(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : int = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , dim=37 )
def __lowerCAmelCase ( self ) ->int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE : List[str] = TFDistilBertModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_tf
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Tuple = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 )
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : str = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 'data2vec-text'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->str:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a_ :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
pass
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a__ : Tuple = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model=_lowerCamelCase , tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(_lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(_lowerCamelCase , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''answer''': ANY(_lowerCamelCase ), '''start''': ANY(_lowerCamelCase ), '''end''': ANY(_lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : Union[str, Any] = '''How many cats are there?'''
SCREAMING_SNAKE_CASE : int = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE : Tuple = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , words=_lowerCamelCase , boxes=_lowerCamelCase , top_k=2 )
self.assertEqual(_lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE : int = INVOICE_URL
SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE : Optional[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : Any = INVOICE_URL
SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
SCREAMING_SNAKE_CASE : Optional[Any] = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE : int = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : int = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE : List[str] = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE : Any = INVOICE_URL
SCREAMING_SNAKE_CASE : Union[str, Any] = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __lowerCAmelCase ( self ) ->str:
pass
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ : Any = logging.get_logger(__name__)
a__ : Union[str, Any] = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
a__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : int = model_type_to_module_name(a__ )
SCREAMING_SNAKE_CASE : str = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a__ , a__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a__ , '''__name__''' , a__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : str = importlib.import_module('''transformers''' )
if hasattr(a__ , a__ ):
return getattr(a__ , a__ )
return None
def UpperCAmelCase_( a__ , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , a__ = False , **a__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_file_from_repo(
a__ , a__ , cache_dir=a__ , force_download=a__ , resume_download=a__ , proxies=a__ , use_auth_token=a__ , revision=a__ , local_files_only=a__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a__ , encoding='''utf-8''' ) as reader:
return json.load(a__ )
class a_ :
"""simple docstring"""
def __init__( self ) ->Any:
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def __lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''config''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''trust_remote_code''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = config_dict.get('''feature_extractor_type''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE : Dict = getattr(_lowerCamelCase , '''feature_extractor_type''' , _lowerCamelCase )
if hasattr(_lowerCamelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE : str = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE : int = feature_extractor_class_from_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE : List[str] = feature_extractor_class is not None or type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE : Dict = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''code_revision''' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase )]
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase ) ->str:
FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return (-y * np.log(a__ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = np.dot(a__ , a__ )
return np.sum(y * scores - np.log(1 + np.exp(a__ ) ) )
def UpperCAmelCase_( a__ , a__ , a__ , a__=70_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = np.zeros(x.shape[1] )
for iterations in range(a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = np.dot(a__ , a__ )
SCREAMING_SNAKE_CASE : Tuple = sigmoid_function(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE : Any = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE : Any = np.dot(a__ , a__ )
SCREAMING_SNAKE_CASE : str = sigmoid_function(a__ )
SCREAMING_SNAKE_CASE : List[str] = cost_function(a__ , a__ )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a__ : int = datasets.load_iris()
a__ : int = iris.data[:, :2]
a__ : int = (iris.target != 0) * 1
a__ : List[Any] = 0.1
a__ : Any = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return sigmoid_function(
np.dot(a__ , a__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((a__) , (a__)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((a__) , (a__)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((a__) , (a__)) : Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a__ : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
a__ : int = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase_( a__ , a__ , a__=1_024 , a__=1_024 , a__=False , **a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tok.pad_token_id
def get_lens(a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for batch in dl:
SCREAMING_SNAKE_CASE : Dict = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
SCREAMING_SNAKE_CASE : Optional[Any] = get_lens(a__ )
SCREAMING_SNAKE_CASE : int = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
SCREAMING_SNAKE_CASE : List[Any] = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = credit_card_number
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = len(a__ ) - 2
for i in range(a__ , -1 , -2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE : List[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
SCREAMING_SNAKE_CASE : Dict = cc_number[:i] + str(a__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(a__ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(a__ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(a__ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
from __future__ import annotations
from typing import Any
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 6 ) ->None:
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : List[str] = Node()
SCREAMING_SNAKE_CASE : Optional[int] = current_node
SCREAMING_SNAKE_CASE : List[str] = current_node
SCREAMING_SNAKE_CASE : List[str] = current_node
for _ in range(1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = Node()
SCREAMING_SNAKE_CASE : Dict = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE : List[Any] = current_node
SCREAMING_SNAKE_CASE : Any = self.front
SCREAMING_SNAKE_CASE : Union[str, Any] = previous_node
def __lowerCAmelCase ( self ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCAmelCase ( self ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE : Any = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE : int = data
def __lowerCAmelCase ( self ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.front.data
SCREAMING_SNAKE_CASE : Optional[Any] = None
return data
SCREAMING_SNAKE_CASE : Tuple = self.front
SCREAMING_SNAKE_CASE : int = old_front.next
SCREAMING_SNAKE_CASE : Any = old_front.data
SCREAMING_SNAKE_CASE : str = None
return data
def __lowerCAmelCase ( self ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCAmelCase ( self ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class a_ :
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : Any | None = None
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import pytest
import datasets
# Import fixture modules as plugins
a__ : Tuple = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=a__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.getbasetemp() / '''cache'''
SCREAMING_SNAKE_CASE : int = test_hf_cache_home / '''datasets'''
SCREAMING_SNAKE_CASE : List[str] = test_hf_cache_home / '''metrics'''
SCREAMING_SNAKE_CASE : Union[str, Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(a__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(a__ ) )
SCREAMING_SNAKE_CASE : int = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(a__ ) )
SCREAMING_SNAKE_CASE : Dict = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a__ ) )
@pytest.fixture(autouse=a__ , scope='''session''' )
def UpperCAmelCase_( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , a__ )
@pytest.fixture
def UpperCAmelCase_( a__ ):
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , a__ )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
__SCREAMING_SNAKE_CASE : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments,) )
((SCREAMING_SNAKE_CASE) , ) : Tuple = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Dict = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
SCREAMING_SNAKE_CASE : Tuple = decoder_config.decoder_start_token_id
SCREAMING_SNAKE_CASE : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
SCREAMING_SNAKE_CASE : int = decoder_config.bos_token_id
if pad_token_id is None:
SCREAMING_SNAKE_CASE : List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
SCREAMING_SNAKE_CASE : Tuple = decoder_config.eos_token_id
SCREAMING_SNAKE_CASE : Dict = decoder_start_token_id
SCREAMING_SNAKE_CASE : str = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
from __future__ import annotations
import numpy as np
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = np.shape(a__ )
if rows != columns:
SCREAMING_SNAKE_CASE : int = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE : int = np.zeros((rows, columns) )
for i in range(a__ ):
for j in range(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(a__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for j in range(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
import os
from collections.abc import Iterator
def UpperCAmelCase_( a__ = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(a__ ):
SCREAMING_SNAKE_CASE : Dict = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a__ )[1] in (".py", ".ipynb"):
yield os.path.join(a__ , a__ ).lstrip('''./''' )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return F"""{i * " "}*""" if i else "\n##"
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(a__ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def UpperCAmelCase_( a__ = "." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ''''''
for filepath in sorted(good_file_paths(a__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = os.path.split(a__ )
if filepath != old_path:
SCREAMING_SNAKE_CASE : Tuple = print_path(a__ , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE : Dict = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(a__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : str = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
a__ : List[str] = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
a__ : List[str] = '''▁'''
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) ->Dict:
return len(self.sp_model )
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Dict = self.sp_model.PieceToId(_lowerCamelCase )
return spm_id if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = []
else:
current_sub_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __getstate__( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : int = {}
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[int, int] = {}
SCREAMING_SNAKE_CASE : List[Any] = 2
while True:
SCREAMING_SNAKE_CASE : Tuple = factor_map.pop(a__ , a__ )
if factor:
SCREAMING_SNAKE_CASE : Optional[Any] = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE : str = factor
else:
SCREAMING_SNAKE_CASE : Optional[int] = prime
yield prime
prime += 1
def UpperCAmelCase_( a__ = 1e10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = sieve()
SCREAMING_SNAKE_CASE : List[Any] = 1
while True:
SCREAMING_SNAKE_CASE : Tuple = next(a__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(a__ )
n += 2
if __name__ == "__main__":
print(solution())
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = len(a__ )
for _ in range(a__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a__ : Any = list(range(10, 0, -1))
print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return np.dot(a__ , a__ )
class a_ :
"""simple docstring"""
def __init__( self , *,
_lowerCamelCase = np.inf , _lowerCamelCase = "linear" , _lowerCamelCase = 0.0 , ) ->None:
SCREAMING_SNAKE_CASE : Any = regularization
SCREAMING_SNAKE_CASE : str = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE : List[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE : Tuple = F"""Unknown kernel: {kernel}"""
raise ValueError(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->float:
return np.dot(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : List[str] = observations
SCREAMING_SNAKE_CASE : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE) , ) : str = np.shape(_lowerCamelCase )
def to_minimize(_lowerCamelCase ) -> float:
SCREAMING_SNAKE_CASE : Any = 0
((SCREAMING_SNAKE_CASE) , ) : List[str] = np.shape(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = LinearConstraint(_lowerCamelCase , 0 , 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE : int = minimize(
_lowerCamelCase , np.ones(_lowerCamelCase ) , bounds=_lowerCamelCase , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE : List[Any] = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE : Dict = 0
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE : Union[str, Any] = s / n
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
a__ : Optional[int] = True
from torch.cuda.amp import autocast
a__ : Tuple = logging.getLogger(__name__)
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=a__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=a__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE : Optional[Any] = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE : List[str] = logging.INFO
logger.setLevel(a__ )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = field(
default=a__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=a__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=a__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=a__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : WavaVecaForPreTraining
__SCREAMING_SNAKE_CASE : WavaVecaFeatureExtractor
__SCREAMING_SNAKE_CASE : Union[bool, str] = "longest"
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
def __call__( self , _lowerCamelCase ) ->Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.pad(
_lowerCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Tuple = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
SCREAMING_SNAKE_CASE : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE : Any = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE : str = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE : Optional[int] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_lowerCamelCase , min_masks=2 , )
return batch
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , *_lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=1.0 , **_lowerCamelCase ) ->List[Any]:
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = max_gumbel_temp
SCREAMING_SNAKE_CASE : Dict = min_gumbel_temp
SCREAMING_SNAKE_CASE : List[str] = gumbel_temp_decay
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_inputs(_lowerCamelCase )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE : Tuple = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE : Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE : Any = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCamelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_args_into_dataclasses()
configure_logger(a__ , a__ )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE : Any = DatasetDict()
SCREAMING_SNAKE_CASE : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE : Dict = DatasetDict()
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=a__ )
def prepare_dataset(a__ ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE : Optional[int] = datasets.map(
a__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE : Dict = vectorized_datasets.filter(
lambda a__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(a__ ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE : Any = vectorized_datasets.map(
a__ , batched=a__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE : str = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
SCREAMING_SNAKE_CASE : str = WavaVecaForPreTraining(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = DataCollatorForWavaVecaPretraining(model=a__ , feature_extractor=a__ )
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaPreTrainer(
model=a__ , data_collator=a__ , args=a__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=a__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 'blenderbot-small'
__SCREAMING_SNAKE_CASE : Any = ['past_key_values']
__SCREAMING_SNAKE_CASE : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _lowerCamelCase=5_0265 , _lowerCamelCase=512 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=8 , _lowerCamelCase=2048 , _lowerCamelCase=16 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="gelu" , _lowerCamelCase=512 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1 , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=2 , **_lowerCamelCase , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Tuple = decoder_layers
SCREAMING_SNAKE_CASE : int = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : int = attention_dropout
SCREAMING_SNAKE_CASE : Any = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : List[Any] = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : int = encoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Optional[int] = super().outputs
else:
SCREAMING_SNAKE_CASE : Any = super(_lowerCamelCase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE : str = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Any = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Tuple = dict(**_lowerCamelCase , **_lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.num_attention_heads
SCREAMING_SNAKE_CASE : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : str = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.num_layers
SCREAMING_SNAKE_CASE : List[str] = min(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = max(_lowerCamelCase , _lowerCamelCase ) - min_num_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
torch.zeros(_lowerCamelCase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowerCamelCase , _lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) )
return common_inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
SCREAMING_SNAKE_CASE : str = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(_lowerCamelCase )
]
return common_inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) ->Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Optional[int] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.num_special_tokens_to_add(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = compute_effective_axis_dimension(
_lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Optional[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = dict(tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return common_inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) ->Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_causal_lm(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
return common_inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Any = super()._flatten_past_key_values_(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[Any] = super(_lowerCamelCase , self )._flatten_past_key_values_(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Dict = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
a__ : Any = F"https://www.google.com/search?q={query}&num=100"
a__ : List[str] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
a__ : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
a__ : Dict = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
from math import pow, sqrt
def UpperCAmelCase_( *a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(a__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Union[str, Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = BertJapaneseTokenizer
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Any = True
def __lowerCAmelCase ( self ) ->Any:
super().setUp()
SCREAMING_SNAKE_CASE : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : List[str] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_input_output_texts(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def __lowerCAmelCase ( self ) ->Tuple:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->Union[str, Any]:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->Tuple:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_lowerCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : Union[str, Any] = pickle.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Any = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
try:
SCREAMING_SNAKE_CASE : Optional[int] = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self ) ->Any:
try:
SCREAMING_SNAKE_CASE : str = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(do_lower_case=_lowerCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
try:
SCREAMING_SNAKE_CASE : Tuple = MecabTokenizer(
do_lower_case=_lowerCamelCase , normalize_text=_lowerCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = MecabTokenizer(normalize_text=_lowerCamelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : int = pickle.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_sudachi
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : int = SudachiTokenizer(do_lower_case=_lowerCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = SudachiTokenizer(normalize_text=_lowerCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = SudachiTokenizer(trim_whitespace=_lowerCamelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_lowerCamelCase , '''wb''' ) as handle:
pickle.dump(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , '''rb''' ) as handle:
SCREAMING_SNAKE_CASE : List[Any] = pickle.load(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@require_jumanpp
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = JumanppTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[str] = JumanppTokenizer(normalize_text=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : int = JumanppTokenizer(trim_whitespace=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
SCREAMING_SNAKE_CASE : List[Any] = {}
for i, token in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Dict = i
SCREAMING_SNAKE_CASE : Any = WordpieceTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE : Any = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_lowerCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
SCREAMING_SNAKE_CASE : Union[str, Any] = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_lowerCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BertJapaneseTokenizer
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : str = '''こんにちは、世界。 \nこんばんは、世界。'''
SCREAMING_SNAKE_CASE : str = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __lowerCAmelCase ( self ) ->int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_lowerCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
SCREAMING_SNAKE_CASE : List[str] = {}
for i, token in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = i
SCREAMING_SNAKE_CASE : Any = CharacterTokenizer(vocab=_lowerCamelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode('''ありがとう。''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''cl-tohoku/bert-base-japanese'''
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
SCREAMING_SNAKE_CASE : str = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if n == 1 or not isinstance(a__ , a__ ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : str = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Any = len(str(fibonacci(a__ ) ) )
return index
def UpperCAmelCase_( a__ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(a__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = scope
SCREAMING_SNAKE_CASE : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : List[str] = num_patches + 2
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->Tuple:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : str = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : int = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Optional[int] = DeiTModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->Dict:
pass
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[str] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCAmelCase ( self ) ->Tuple:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model(**_lowerCamelCase ).loss
loss.backward()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : str = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
SCREAMING_SNAKE_CASE : Optional[int] = problem_type['''title''']
SCREAMING_SNAKE_CASE : Optional[int] = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE : Tuple = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : Tuple = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE : Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __lowerCAmelCase ( self ) ->Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Tuple:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import argparse
import os
import re
a__ : Any = '''src/transformers'''
# Pattern that looks at the indentation in a line.
a__ : List[str] = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : List[Any] = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Any = re.compile(r'''\[([^\]]+)\]''')
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_( a__ , a__="" , a__=None , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : int = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
SCREAMING_SNAKE_CASE : str = ['''\n'''.join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE : Optional[Any] = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(a__ ) )
if index < len(a__ ) - 1:
SCREAMING_SNAKE_CASE : List[str] = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
else:
blocks.append('''\n'''.join(a__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append('''\n'''.join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def UpperCAmelCase_( a__ ):
"""simple docstring"""
def _inner(a__ ):
return key(a__ ).lower().replace('''_''' , '''''' )
return _inner
def UpperCAmelCase_( a__ , a__=None ):
"""simple docstring"""
def noop(a__ ):
return x
if key is None:
SCREAMING_SNAKE_CASE : List[Any] = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE : Union[str, Any] = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE : int = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE : Dict = [obj for obj in objects if not key(a__ )[0].isupper()]
SCREAMING_SNAKE_CASE : Optional[int] = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
def _replace(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
SCREAMING_SNAKE_CASE : Optional[int] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE : List[Any] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(a__ )] ) + "]"
SCREAMING_SNAKE_CASE : Dict = import_statement.split('''\n''' )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE : Dict = 2 if lines[1].strip() == '''[''' else 1
SCREAMING_SNAKE_CASE : Dict = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE : Union[str, Any] = sort_objects(a__ , key=lambda a__ : x[1] )
SCREAMING_SNAKE_CASE : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE : str = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE : Any = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE : Dict = keys[:-1]
SCREAMING_SNAKE_CASE : List[str] = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE : str = _re_bracket_content.sub(_replace , a__ )
return import_statement
def UpperCAmelCase_( a__ , a__=True ):
"""simple docstring"""
with open(a__ , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE : Tuple = split_code_in_indented_blocks(
a__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE : str = main_blocks[block_idx]
SCREAMING_SNAKE_CASE : str = block.split('''\n''' )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE : str = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE : Tuple = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE : Optional[Any] = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE : Optional[Any] = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE : Dict = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE : Any = [(i, key) for i, key in enumerate(a__ ) if key is not None]
SCREAMING_SNAKE_CASE : Union[str, Any] = [x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : str = []
for i in range(len(a__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE : int = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(a__ ) )
def UpperCAmelCase_( a__=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE : str = sort_imports(os.path.join(a__ , '''__init__.py''' ) , check_only=a__ )
if result:
SCREAMING_SNAKE_CASE : str = [os.path.join(a__ , '''__init__.py''' )]
if len(a__ ) > 0:
raise ValueError(F"""Would overwrite {len(a__ )} files, run `make style`.""" )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
a__ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Optional[int] = str(a__ )
while len(a__ ) != 1:
SCREAMING_SNAKE_CASE : str = [int(a__ ) for i in num_string]
SCREAMING_SNAKE_CASE : str = 1
for i in range(0 , len(a__ ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a__ )
steps += 1
return steps
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = str(a__ )
while len(a__ ) != 1:
SCREAMING_SNAKE_CASE : Dict = [int(a__ ) for i in num_string]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(0 , len(a__ ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = str(a__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[int] = logging.get_logger()
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=a__ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=a__ )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = len(list(m.modules() ) ) == 1 or isinstance(_lowerCamelCase , nn.Convad ) or isinstance(_lowerCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowerCamelCase )
def __call__( self , _lowerCamelCase ) ->int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowerCamelCase )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self ) ->Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _lowerCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : List = field(default_factory=a__ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=a__ )
__SCREAMING_SNAKE_CASE : bool = True
def __call__( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = Tracker(self.dest )(_lowerCamelCase ).parametrized
SCREAMING_SNAKE_CASE : str = Tracker(self.src )(_lowerCamelCase ).parametrized
SCREAMING_SNAKE_CASE : Any = list(filter(lambda _lowerCamelCase : type(_lowerCamelCase ) not in self.src_skip , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda _lowerCamelCase : type(_lowerCamelCase ) not in self.dest_skip , _lowerCamelCase ) )
if len(_lowerCamelCase ) != len(_lowerCamelCase ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(_lowerCamelCase )} operations while"""
F""" destination module has {len(_lowerCamelCase )}.""" )
for dest_m, src_m in zip(_lowerCamelCase , _lowerCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Tuple:
super().__init__()
SCREAMING_SNAKE_CASE : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F"""Unexpected layer name {k}"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleDict(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return get_trunk_forward_outputs(
_lowerCamelCase , out_feat_keys=_lowerCamelCase , feature_blocks=self._feature_blocks , )
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Union[str, Any] = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , _lowerCamelCase ) ->Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
SCREAMING_SNAKE_CASE : List[Any] = self.convert_name_to_timm(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = partial(lambda: (timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ).eval(), None) )
else:
SCREAMING_SNAKE_CASE : Dict = super().__getitem__(_lowerCamelCase )
return val
class a_ ( a__ ):
"""simple docstring"""
def __getitem__( self , _lowerCamelCase ) ->Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE : Any = RegNetModel
else:
SCREAMING_SNAKE_CASE : int = RegNetForImageClassification
return val
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE : Optional[Any] = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , a__ = True , ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = from_model_func()
SCREAMING_SNAKE_CASE : Optional[int] = our_model_func(a__ ).eval()
SCREAMING_SNAKE_CASE : Tuple = ModuleTransfer(src=a__ , dest=a__ , raise_if_mismatch=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(a__ )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE : List[str] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE : Any = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
SCREAMING_SNAKE_CASE : Any = manually_copy_vissl_head(a__ , our_model.state_dict() , a__ )
our_model.load_state_dict(a__ )
SCREAMING_SNAKE_CASE : Tuple = our_model(a__ , output_hidden_states=a__ )
SCREAMING_SNAKE_CASE : Tuple = (
our_outputs.logits if isinstance(a__ , a__ ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE : List[Any] = from_model(a__ )
SCREAMING_SNAKE_CASE : Dict = from_output[-1] if type(a__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(a__ , a__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=a__ , )
SCREAMING_SNAKE_CASE : str = 224 if '''seer''' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=a__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=a__ , )
print(F"""Pushed {name}""" )
def UpperCAmelCase_( a__ , a__ = None , a__ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = 1_000
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, num_labels)
SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : int = json.load(open(cached_download(hf_hub_url(a__ , a__ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Tuple = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = partial(a__ , num_labels=a__ , idalabel=a__ , labelaid=a__ )
SCREAMING_SNAKE_CASE : Any = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
SCREAMING_SNAKE_CASE : Dict = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(a__ , a__ ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(a__ , model_dir=str(a__ ) , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : List[str] = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE : List[Any] = files['''classy_state_dict''']['''base_model''']['''model''']
SCREAMING_SNAKE_CASE : Any = model_state_dict['''trunk''']
model.load_state_dict(a__ )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE : List[Any] = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE : Dict = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE : Optional[int] = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE : Any = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE : int = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE : Optional[int] = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE : Any = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE : Any = partial(
a__ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
a__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , a__ , a__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
a__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , a__ , a__ , a__ , )
return config, expected_shape
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
a__ : Optional[int] = parser.parse_args()
a__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 313
|
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313
| 1
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Tuple = '''▁'''
a__ : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = BigBirdTokenizer
__SCREAMING_SNAKE_CASE : Any = BigBirdTokenizerFast
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __lowerCAmelCase ( self ) ->Union[str, Any]:
super().setUp()
SCREAMING_SNAKE_CASE : str = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = '''<s>'''
SCREAMING_SNAKE_CASE : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def __lowerCAmelCase ( self ) ->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) ->str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : int = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self ) ->List[str]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = '''Hello World!'''
SCREAMING_SNAKE_CASE : Tuple = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE : Dict = ''' '''.join(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = BigBirdConfig(attention_type='''original_full''' )
SCREAMING_SNAKE_CASE : List[Any] = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def __lowerCAmelCase ( self ) ->Dict:
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 313
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False ) ->Any:
SCREAMING_SNAKE_CASE : str = scheduler
SCREAMING_SNAKE_CASE : List[str] = optimizers if isinstance(_lowerCamelCase , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE : Union[str, Any] = split_batches
SCREAMING_SNAKE_CASE : List[Any] = step_with_optimizer
SCREAMING_SNAKE_CASE : List[str] = GradientState()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self ) ->List[str]:
return self.scheduler.state_dict()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
self.scheduler.load_state_dict(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.scheduler.get_lr()
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->List[str]:
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase )
| 313
| 1
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[str] = use_attention_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_choices
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxRoFormerModelTester(self )
@slow
def __lowerCAmelCase ( self ) ->List[str]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
SCREAMING_SNAKE_CASE : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE : Optional[int] = 5_0000
SCREAMING_SNAKE_CASE : str = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = embeddings_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self ) ->Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetModel(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRegNetForImageClassification(config=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : str = False
def __lowerCAmelCase ( self ) ->None:
SCREAMING_SNAKE_CASE : List[str] = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) ->Dict:
return
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __lowerCAmelCase ( self ) ->int:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __lowerCAmelCase ( self ) ->List[str]:
pass
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->str:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : int = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Any = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : List[Any] = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->int:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 'MCTCTFeatureExtractor'
__SCREAMING_SNAKE_CASE : str = 'AutoTokenizer'
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
super().__init__(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.feature_extractor
SCREAMING_SNAKE_CASE : Dict = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''audio''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''sampling_rate''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''text''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Dict = args[0]
SCREAMING_SNAKE_CASE : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : Any = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : List[str] = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->Optional[Any]:
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''input_features''' , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = kwargs.pop('''labels''' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = args[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->int:
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def __lowerCAmelCase ( self ) ->Optional[Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer
yield
SCREAMING_SNAKE_CASE : Dict = self.feature_extractor
SCREAMING_SNAKE_CASE : Optional[Any] = False
| 313
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def UpperCAmelCase_( a__ , a__ , a__=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE : Any = ''''''
else:
SCREAMING_SNAKE_CASE : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = val
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ViTMSNConfig()
SCREAMING_SNAKE_CASE : Optional[int] = 1_000
SCREAMING_SNAKE_CASE : str = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE : List[str] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(a__ , a__ ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(a__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : str = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = 384
SCREAMING_SNAKE_CASE : Any = 1_536
SCREAMING_SNAKE_CASE : List[str] = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = 1_024
SCREAMING_SNAKE_CASE : Optional[int] = 4_096
SCREAMING_SNAKE_CASE : Tuple = 24
SCREAMING_SNAKE_CASE : Union[str, Any] = 16
SCREAMING_SNAKE_CASE : Dict = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_024
SCREAMING_SNAKE_CASE : List[Any] = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : Tuple = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMSNModel(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(a__ )
SCREAMING_SNAKE_CASE : Any = create_rename_keys(a__ , base_model=a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ , base_model=a__ )
model.load_state_dict(a__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(a__ , stream=a__ ).raw )
SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=a__ , image_std=a__ )
SCREAMING_SNAKE_CASE : int = image_processor(images=a__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE : Tuple = model(**a__ )
SCREAMING_SNAKE_CASE : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , a__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_features']
def __init__( self , _lowerCamelCase=80 , _lowerCamelCase=1_6000 , _lowerCamelCase=160 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=0.0 , _lowerCamelCase=False , **_lowerCamelCase , ) ->List[str]:
super().__init__(
feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Tuple = n_fft
SCREAMING_SNAKE_CASE : Optional[Any] = hop_length
SCREAMING_SNAKE_CASE : Optional[int] = chunk_length
SCREAMING_SNAKE_CASE : List[str] = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE : Any = self.n_samples // hop_length
SCREAMING_SNAKE_CASE : List[Any] = sampling_rate
SCREAMING_SNAKE_CASE : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowerCamelCase , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->np.ndarray:
SCREAMING_SNAKE_CASE : Any = spectrogram(
_lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE : Any = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = np.maximum(_lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) ->List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE : int = np.array(_lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE : List[str] = padding_value
normed_input_values.append(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "max_length" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE : List[str] = isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE : str = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE : Dict = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : List[str] = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE : Any = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : List[str] = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE : Any = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE : List[str] = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = [self._np_extract_fbank_features(_lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 313
|
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313
| 1
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313
| 1
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 4, 64, 64) , _lowerCamelCase=False ) ->str:
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Tuple = jnp.array(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) , dtype=_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase=False , _lowerCamelCase="CompVis/stable-diffusion-v1-4" ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = '''bf16''' if fpaa else None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = FlaxUNetaDConditionModel.from_pretrained(
_lowerCamelCase , subfolder='''unet''' , dtype=_lowerCamelCase , revision=_lowerCamelCase )
return model, params
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 77, 768) , _lowerCamelCase=False ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) , dtype=_lowerCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_latents(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_encoder_hidden_states(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = model.apply(
{'''params''': params} , _lowerCamelCase , jnp.array(_lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(_lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.get_latents(_lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(_lowerCamelCase , shape=(4, 77, 1024) , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model.apply(
{'''params''': params} , _lowerCamelCase , jnp.array(_lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(_lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
| 313
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313
| 1
|
a__ : int = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 313
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
| 1
|
import os
from datetime import datetime as dt
from github import Github
a__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE : Tuple = g.get_repo('''huggingface/diffusers''' )
SCREAMING_SNAKE_CASE : Dict = repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE : str = sorted(issue.get_comments() , key=lambda a__ : i.created_at , reverse=a__ )
SCREAMING_SNAKE_CASE : Tuple = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 32
@property
def __lowerCAmelCase ( self ) ->str:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Dict:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Tuple:
return 100
@property
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : str = self.dummy_unet
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
SCREAMING_SNAKE_CASE : int = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : List[Any] = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Any = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : List[str] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
| 1
|
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : Optional[int] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : str = 'gelu'
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=20 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : str = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE : Dict = pad_token_id
SCREAMING_SNAKE_CASE : int = bos_token_id
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE : Dict = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = 20
SCREAMING_SNAKE_CASE : int = model_class_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = model.decode(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Any = 20
SCREAMING_SNAKE_CASE : Any = model_class_name(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_( a__ , a__ , a__ , a__=None , a__=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : int = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : int = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE : List[str] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE : Tuple = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE : int = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) ->Any:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = np.ones((1, 1) )
SCREAMING_SNAKE_CASE : List[str] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : List[str] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE : Tuple = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''np''' , truncation=_lowerCamelCase , max_length=512 , padding=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = model.generate(**_lowerCamelCase , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
assert tgt_text == decoded
| 313
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a__ : List[str] = '''CompVis/stable-diffusion-v1-1'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a__ : Any = '''CompVis/stable-diffusion-v1-3'''
a__ : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ) ->str:
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) ->Dict[str, Any]:
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self , _lowerCamelCase = "auto" ) ->Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->str:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Tuple:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Optional[Any]:
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 313
| 1
|
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE : int = True
for i in range(0 , len(a__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(1 , len(a__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
a__ : int = [int(x) for x in input().split()]
# inputing elements of the list in one line
a__ : List[str] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 313
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
__SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
__SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
__SCREAMING_SNAKE_CASE : int = 1280
__SCREAMING_SNAKE_CASE : float = 0.0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : bool = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : List[str] = self.block_out_channels
SCREAMING_SNAKE_CASE : Optional[int] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE : List[str] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE : Dict = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : int = block_out_channels[i]
SCREAMING_SNAKE_CASE : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = down_blocks
# mid
SCREAMING_SNAKE_CASE : int = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : str = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = list(reversed(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
SCREAMING_SNAKE_CASE : Dict = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE : str = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = output_channel
SCREAMING_SNAKE_CASE : Tuple = up_blocks
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
SCREAMING_SNAKE_CASE : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
SCREAMING_SNAKE_CASE : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE : List[str] = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.expand_dims(_lowerCamelCase , 0 )
SCREAMING_SNAKE_CASE : List[str] = self.time_proj(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.time_embedding(_lowerCamelCase )
# 2. pre-process
SCREAMING_SNAKE_CASE : int = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_in(_lowerCamelCase )
# 3. down
SCREAMING_SNAKE_CASE : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE : Dict = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE : Optional[int] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE : Optional[int] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.silu(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 313
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Union[str, Any] = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : List[Any] = tree
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) ->Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = StableUnCLIPImgaImgPipeline
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Tuple = frozenset([] )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 32
SCREAMING_SNAKE_CASE : Tuple = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE : int = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCamelCase , projection_dim=_lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableUnCLIPImageNormalizer(embedding_dim=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCamelCase , layers_per_block=1 , upcast_attention=_lowerCamelCase , use_linear_projection=_lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE : Optional[Any] = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if pil_image:
SCREAMING_SNAKE_CASE : Any = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE : int = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE : List[str] = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = StableUnCLIPImgaImgPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_lowerCamelCase )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : str = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) ->Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCamelCase )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(_lowerCamelCase , '''anime turle''' , generator=_lowerCamelCase , output_type='''np''' )
SCREAMING_SNAKE_CASE : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE : Dict = pipe(
_lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 313
| 1
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a_ :
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Union[str, Path]] = None
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : Optional[Dict] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = None
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : Optional[Dict] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
def __lowerCAmelCase ( self ) ->"DownloadConfig":
return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
| 313
|
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313
| 1
|
import math
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [True] * n
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Union[str, Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE : int = i * 2
while index < n:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[str] = index + i
SCREAMING_SNAKE_CASE : List[Any] = [2]
for i in range(3 , a__ , 2 ):
if is_prime[i]:
primes.append(a__ )
return primes
def UpperCAmelCase_( a__ = 999_966_663_333 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = math.floor(math.sqrt(a__ ) ) + 100
SCREAMING_SNAKE_CASE : Optional[int] = prime_sieve(a__ )
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Dict = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE : Optional[int] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE : int = last_prime**2
SCREAMING_SNAKE_CASE : str = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE : List[str] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE : Optional[int] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE : Tuple = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 313
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase_( a__=32 , a__=10 , a__=100 , a__=1_026 , a__=True , a__="data/tokenized_stories_train_wikitext103.jbl" , a__="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = generate_datasets(
a__ , a__ , number=a__ , min_len=1_026 , trim=a__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Dict = load_gpta('''gpt2''' ).to(a__ )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : int = compute_perplexity(a__ , a__ , a__ ).item()
print('''perplexity on objective set:''' , a__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase_( a__ , a__=15 , a__=128 , a__=100 , a__="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : str = SecondaryLearner(a__ )
# Train secondary learner
SCREAMING_SNAKE_CASE : Union[str, Any] = train_secondary_learner(
a__ , a__ , max_epochs=a__ , batch_size=a__ , eval_freq=100 , igf_model_path=a__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase_( a__ , a__ , a__ , a__=32 , a__=1_000 , a__=16 , a__=1.0 , a__=recopy_gpta , a__=None , a__=10 , a__="gpt2_finetuned.pt" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(a__ )
SCREAMING_SNAKE_CASE : Dict = DataLoader(a__ , sampler=a__ )
SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(a__ )) + 1
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = recopy_model(a__ , a__ , a__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a__ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
for epoch in range(int(a__ ) ):
for step, example in enumerate(a__ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ , labels=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : List[str] = secondary_learner.forward(
torch.tensor(a__ , dtype=torch.long , device=a__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : str = compute_perplexity(a__ , a__ , a__ )
test_perps.append(a__ )
print('''Test perplexity, step''' , a__ , ''':''' , a__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a__ , type=a__ , required=a__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a__ , type=a__ , required=a__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=a__ , default=a__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=a__ , default=a__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=a__ , type=a__ , required=a__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=a__ , type=a__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=a__ , default=a__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=a__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=a__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=a__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=a__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=a__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=a__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=a__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=a__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=a__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=a__ , type=a__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=a__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=a__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=a__ , type=a__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=a__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : List[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : Tuple = training_secondary_learner(
a__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=a__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a__ , a__ , a__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=a__ , secondary_learner=a__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 313
| 1
|
def UpperCAmelCase_( a__ = 1_000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = 1, 1
SCREAMING_SNAKE_CASE : Any = []
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE : str = prev_numerator + 2 * prev_denominator
SCREAMING_SNAKE_CASE : Any = prev_numerator + prev_denominator
if len(str(a__ ) ) > len(str(a__ ) ):
result.append(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = numerator
SCREAMING_SNAKE_CASE : Tuple = denominator
return len(a__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 313
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = filter(lambda a__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Any = logging.getLogger(__name__)
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if metric == "rouge2":
SCREAMING_SNAKE_CASE : str = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
SCREAMING_SNAKE_CASE : List[Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
SCREAMING_SNAKE_CASE : int = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
SCREAMING_SNAKE_CASE : int = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
SCREAMING_SNAKE_CASE : Dict = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=a__ , verbose=a__ , )
class a_ ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
SCREAMING_SNAKE_CASE : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : Any = od / '''test_results.txt'''
SCREAMING_SNAKE_CASE : Optional[int] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
SCREAMING_SNAKE_CASE : Tuple = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_lowerCamelCase )
generations_file.parent.mkdir(exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , '''a+''' ) as writer:
for key in sorted(_lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Tuple = metrics[key]
if isinstance(_lowerCamelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = val.item()
SCREAMING_SNAKE_CASE : Tuple = F"""{key}: {val:.6f}\n"""
writer.write(_lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_lowerCamelCase )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Optional[int] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : int = count_trainable_parameters(_lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_lowerCamelCase , _lowerCamelCase , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 313
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a__ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
__SCREAMING_SNAKE_CASE : str = "utf-8"
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : bool = True # deprecated
__SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
__SCREAMING_SNAKE_CASE : int = 10 << 20 # 10MB
__SCREAMING_SNAKE_CASE : Optional[bool] = None
class a_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = JsonConfig
def __lowerCAmelCase ( self ) ->List[str]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
SCREAMING_SNAKE_CASE : int = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Optional[Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Tuple = [files]
SCREAMING_SNAKE_CASE : Dict = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE : str = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = [files]
SCREAMING_SNAKE_CASE : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , _lowerCamelCase ) ->pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE : List[str] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
SCREAMING_SNAKE_CASE : List[Any] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : Any = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(_lowerCamelCase )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE : Optional[int] = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE : Optional[Any] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE : Tuple = dataset
SCREAMING_SNAKE_CASE : Optional[int] = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE : List[str] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
SCREAMING_SNAKE_CASE : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE : Dict = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
SCREAMING_SNAKE_CASE : str = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE : Any = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE : Union[str, Any] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE : List[Any] = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 313
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE : str = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE : str = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
SCREAMING_SNAKE_CASE : str = list(a__ )
return word_list
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE : List[str] = max([len(a__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE : Tuple = bert_tokens
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = 0, len(a__ )
while start < end:
SCREAMING_SNAKE_CASE : Dict = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE : Optional[int] = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
SCREAMING_SNAKE_CASE : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE : Optional[int] = '''##''' + bert_word[j]
SCREAMING_SNAKE_CASE : List[str] = start + i
SCREAMING_SNAKE_CASE : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : Any = []
for i in range(0 , len(a__ ) , 100 ):
SCREAMING_SNAKE_CASE : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a__ , truncation=a__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(a__ ) == len(a__ )
SCREAMING_SNAKE_CASE : int = []
for input_ids, chinese_word in zip(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
SCREAMING_SNAKE_CASE : List[str] = add_sub_symbol(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE : Optional[int] = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.readlines()
SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE : List[str] = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE : int = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE : Tuple = [json.dumps(a__ ) + '''\n''' for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
a__ : int = parser.parse_args()
main(args)
| 313
| 1
|
a__ : List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a__ : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a__ : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 313
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Tuple = '''1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''f32le'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
SCREAMING_SNAKE_CASE : Optional[Any] = output_stream[0]
SCREAMING_SNAKE_CASE : Any = np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCAmelCase_( a__ , a__ , a__ = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{sampling_rate}"""
SCREAMING_SNAKE_CASE : Dict = '''1'''
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = '''alsa'''
SCREAMING_SNAKE_CASE : Any = '''default'''
elif system == "Darwin":
SCREAMING_SNAKE_CASE : Union[str, Any] = '''avfoundation'''
SCREAMING_SNAKE_CASE : Optional[int] = ''':0'''
elif system == "Windows":
SCREAMING_SNAKE_CASE : int = '''dshow'''
SCREAMING_SNAKE_CASE : Any = '''default'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = _ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCAmelCase_( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : Tuple = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s
SCREAMING_SNAKE_CASE : Union[str, Any] = ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Optional[int] = np.intaa
SCREAMING_SNAKE_CASE : List[Any] = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Any = np.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : Optional[Any] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
SCREAMING_SNAKE_CASE : List[Any] = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : Dict = np.frombuffer(item['''raw'''] , dtype=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_( a__ , a__ , a__ , a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = b''''''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
SCREAMING_SNAKE_CASE : List[str] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : str = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : List[str] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
SCREAMING_SNAKE_CASE : List[str] = False
yield item
SCREAMING_SNAKE_CASE : Dict = stride_left
SCREAMING_SNAKE_CASE : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
SCREAMING_SNAKE_CASE : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : str = ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 313
| 1
|
from maths.prime_check import is_prime
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(a__ )
if is_prime(a__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313
| 1
|
from __future__ import annotations
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE : Optional[Any] = int(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(a__ )
SCREAMING_SNAKE_CASE : list[str] = []
for temp in range(int(a__ ) ):
series.append(F"""1 / {pow(temp + 1 , int(a__ ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Enter the last number (nth term) of the P-Series'''))
a__ : List[Any] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 313
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : int = logging.get_logger(__name__)
a__ : Optional[Any] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'deformable_detr'
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=300 , _lowerCamelCase=1024 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=1024 , _lowerCamelCase=8 , _lowerCamelCase=0.0 , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1.0 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="sine" , _lowerCamelCase="resnet50" , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=False , _lowerCamelCase=300 , _lowerCamelCase=False , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=1 , _lowerCamelCase=5 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2_5 , _lowerCamelCase=False , **_lowerCamelCase , ) ->Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE : int = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = use_timm_backbone
SCREAMING_SNAKE_CASE : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = d_model
SCREAMING_SNAKE_CASE : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE : str = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : int = decoder_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_function
SCREAMING_SNAKE_CASE : Optional[int] = init_std
SCREAMING_SNAKE_CASE : List[str] = init_xavier_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss
SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE : str = backbone
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = dilation
# deformable attributes
SCREAMING_SNAKE_CASE : str = num_feature_levels
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_n_points
SCREAMING_SNAKE_CASE : Any = decoder_n_points
SCREAMING_SNAKE_CASE : str = two_stage
SCREAMING_SNAKE_CASE : List[str] = two_stage_num_proposals
SCREAMING_SNAKE_CASE : Dict = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE : int = class_cost
SCREAMING_SNAKE_CASE : Union[str, Any] = bbox_cost
SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE : Union[str, Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE : Tuple = giou_loss_coefficient
SCREAMING_SNAKE_CASE : Optional[int] = eos_coefficient
SCREAMING_SNAKE_CASE : Tuple = focal_alpha
SCREAMING_SNAKE_CASE : Optional[int] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def __lowerCAmelCase ( self ) ->int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) ->int:
return self.d_model
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Any = self.__class__.model_type
return output
| 313
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = KandinskyImgaImgPipeline
__SCREAMING_SNAKE_CASE : str = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__SCREAMING_SNAKE_CASE : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : List[Any] = False
@property
def __lowerCAmelCase ( self ) ->int:
return 32
@property
def __lowerCAmelCase ( self ) ->List[str]:
return 32
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) ->Tuple:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) ->Optional[int]:
return 100
@property
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) ->Tuple:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Dict = MultilingualCLIP(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def __lowerCAmelCase ( self ) ->List[str]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) ->Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_unet
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->str:
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : str = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : str = '''cpu'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : str = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE : Any = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : str = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = pipeline(
_lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 313
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,)
__SCREAMING_SNAKE_CASE : Optional[int] = 10
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_lowerCamelCase )
return config
def __lowerCAmelCase ( self ) ->Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = output.prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 313
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = '''▁'''
a__ : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ : List[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
a__ : Any = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
a__ : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[Any] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.sp_model )
SCREAMING_SNAKE_CASE : Optional[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : List[Any] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : List[str] = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCAmelCase ( self ) ->str:
return self._src_lang
@src_lang.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) ->Dict:
SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Tuple = ''''''
SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) ->List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : int = src_lang
SCREAMING_SNAKE_CASE : Optional[int] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang_id
return inputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = "en_XX" , _lowerCamelCase = None , _lowerCamelCase = "ro_RO" , **_lowerCamelCase , ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCAmelCase ( self ) ->Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : Dict = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : Any = [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE : int = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : Tuple = [self.eos_token_id]
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.