code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a , a=False ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__A : Tuple = os.path.abspath(a )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
__A : Optional[Any] = torch.load(a , map_location='cpu' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
__A : Tuple = convert_pytorch_state_dict_to_flax(a , a )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__A : Tuple = convert_pytorch_sharded_state_dict_to_flax(a , a )
return flax_state_dict
def _SCREAMING_SNAKE_CASE ( a , a , a , a , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(a ) -> bool:
return len(set(a ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__A : Optional[Any] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__A : List[str] = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(a ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__A : Dict = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(a ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__A : Optional[int] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(a ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__A : Dict = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(a ):
__A : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__A : Dict = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(a ):
__A : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__A : Optional[Any] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__A : str = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__A : int = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__A : List[Any] = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__A : Any = pt_tuple_key[-2] + '_v'
if name is not None:
__A : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[str]:
# convert pytorch tensor to numpy
__A : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
__A : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__A : List[Any] = flax_model.params['params']
else:
__A : Tuple = flax_model.params
__A : int = flatten_dict(a )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__A : Tuple = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(a )
__A : Optional[Any] = {}
__A : Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__A : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__A : List[str] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__A : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
__A , __A : List[str] = rename_key_and_reshape_tensor(
a , a , a , a )
# add model prefix if necessary
__A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__A : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__A : List[str] = jnp.asarray(a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(a , a )
continue
# also add unexpected weight so that warning is thrown
__A : List[str] = jnp.asarray(a )
else:
# also add unexpected weight so that warning is thrown
__A : int = jnp.asarray(a )
return unflatten_dict(a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
import torch
# Load the index
__A : Optional[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
__A : Optional[Any] = torch.load(a )
__A : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
__A : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__A : Dict = flax_model.params['params']
__A : Any = flatten_dict(a )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__A : Optional[Any] = flax_model.params
__A : List[Any] = flatten_dict(a )
__A : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__A : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__A : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__A : Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
__A , __A : str = rename_key_and_reshape_tensor(
a , a , a , a )
# add model prefix if necessary
__A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__A : List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__A : Any = jnp.asarray(a )
continue
if "var" in flax_key[-1]:
__A : str = jnp.asarray(a )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(a , a )
continue
# also add unexpected weight so that warning is thrown
__A : Any = jnp.asarray(a )
else:
# also add unexpected weight so that warning is thrown
__A : int = jnp.asarray(a )
return unflatten_dict(a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
__A : List[str] = os.path.abspath(a )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
__A : Optional[int] = getattr(a , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(a , 'rb' ) as state_f:
try:
__A : Dict = from_bytes(a , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(a , a )
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__A : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a : x.dtype == jnp.bfloataa , a ) ).values()
if any(a ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__A : Optional[int] = jax.tree_util.tree_map(
lambda a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a )
__A : Optional[int] = flatten_dict(a )
__A : List[Any] = pt_model.state_dict()
__A : Dict = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__A : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__A : Tuple = []
__A : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__A : Optional[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
__A : Union[str, Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__A : str = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__A : Any = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(a ) not in pt_model_dict:
# conv layer
__A : Dict = flax_key_tuple[:-1] + ('weight',)
__A : Tuple = jnp.transpose(a , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ) not in pt_model_dict:
# linear layer
__A : Any = flax_key_tuple[:-1] + ('weight',)
__A : List[str] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__A : str = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__A : Dict = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__A : Tuple = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__A : Dict = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__A : List[Any] = '.'.join(a )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__A : List[str] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__A : List[Any] = key.split('.' )
__A : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__A : Dict = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__A : Optional[int] = key_components[-2] + '_v'
if name is not None:
__A : Dict = key_components[:-3] + [name]
__A : Any = '.'.join(a )
__A : List[Any] = key
if flax_key in special_pt_names:
__A : List[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__A : str = np.asarray(a ) if not isinstance(a , np.ndarray ) else flax_tensor
__A : Optional[Any] = torch.from_numpy(a )
# remove from missing keys
missing_keys.remove(a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a )
pt_model.load_state_dict(a )
# re-transform missing_keys to list
__A : Union[str, Any] = list(a )
if len(a ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(a ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 239
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _A( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__A : Tuple = Counter(_A )
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Union[str, Any] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple[Optional[str], str]:
__A : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : List[Any] = full_content[1:].index('---' ) + 1
__A : Dict = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _A ):
with open(_A , encoding='utf-8' ) as readme_file:
__A , __A : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def UpperCAmelCase_ ( self , _A ):
if path.exists():
with open(_A , encoding='utf-8' ) as readme_file:
__A : Union[str, Any] = readme_file.read()
else:
__A : List[Any] = None
__A : Any = self._to_readme(_A )
with open(_A , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_A )
def UpperCAmelCase_ ( self , _A = None ):
if readme_content is not None:
__A , __A : str = _split_yaml_from_readme(_A )
__A : Any = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : List[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : Optional[int] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def UpperCAmelCase_ ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='utf-8' , ).decode('utf-8' )
UpperCAmelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Any = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCAmelCase : Any = ap.parse_args()
UpperCAmelCase : Any = Path(args.readme_filepath)
UpperCAmelCase : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 239
| 1
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCAmelCase = 'sshleifer/bart-tiny-random'
UpperCAmelCase = 'patrickvonplaten/t5-tiny-random'
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ) -> Dict:
return AutoConfig.from_pretrained(A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase, *lowerCAmelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase, *lowerCAmelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase, *lowerCAmelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=A_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase, *lowerCAmelCase = create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __snake_case ( self ) -> Union[str, Any]:
with self.assertRaises(A_ ):
create_student_by_copying_alternating_layers(A_ , tempfile.mkdtemp() , e=A_ , d=A_ )
| 344
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = SwinvaConfig()
lowerCAmelCase = swinva_name.split("""_""" )
lowerCAmelCase = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase = int(name_split[3][-3:] )
else:
lowerCAmelCase = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase = int(name_split[2][-2:] )
else:
lowerCAmelCase = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 6, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase = 96
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase = 128
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (4, 8, 16, 32)
else:
lowerCAmelCase = 192
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase = 21_841
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-22k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase = 1_000
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = img_size
lowerCAmelCase = num_classes
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
return config
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowerCAmelCase = """encoder.""" + name
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase = """layernorm.bias"""
if "head" in name:
lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
lowerCAmelCase = """swinv2.""" + name
return name
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[1] )
lowerCAmelCase = int(key_split[3] )
lowerCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[
dim : dim * 2
]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
lowerCAmelCase = get_swinva_config(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
lowerCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
lowerCAmelCase = timm_model(inputs["""pixel_values"""] )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 344
| 1
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase )
# set absolute/relative position embeddings parameter
__UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
__UpperCAmelCase : Tuple = 4
__UpperCAmelCase : Any = True
# hparam_utils.py hparams
__UpperCAmelCase : Union[str, Any] = 0.664694
__UpperCAmelCase : Union[str, Any] = 0.207951
__UpperCAmelCase : int = 0.121194
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[str] = 0.0352513
__UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__UpperCAmelCase : int = 4
__UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
__UpperCAmelCase : int = 36.4519
__UpperCAmelCase : str = 0.903421
__UpperCAmelCase : Dict = 222.088
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = 0.763141
__UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase )
elif task == "TABFACT":
__UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase )
elif task == "MLM":
__UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
__UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(UpperCamelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCamelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 77
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
A = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
A = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
A = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def a_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False):
"""simple docstring"""
__UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 77
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = extended_euclid(lowerCamelCase__ , a % b )
__lowerCamelCase : Dict = a // b
return (y, x - k * y)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = na * na
__lowerCamelCase : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
((__lowerCamelCase) , (__lowerCamelCase)) : Any = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
if b < 0:
__lowerCamelCase : Optional[int] = (b % n + n) % n
return b
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase , __lowerCamelCase : str = invert_modulo(lowerCamelCase__ , lowerCamelCase__ ), invert_modulo(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = na * na
__lowerCamelCase : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 337
|
a ="""0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 337
| 1
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase : Optional[int] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowercase : Dict = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowercase : List[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
return float((preds == labels).mean() )
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any="binary" ):
__UpperCamelCase : str = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Optional[Any] = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ (_lowerCAmelCase : int , _lowerCAmelCase : Any ):
__UpperCamelCase : List[Any] = {}
for id_pred, label in zip(_lowerCAmelCase , _lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
__UpperCamelCase : int = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCamelCase : List[Any] = [(pred, label)]
__UpperCamelCase , __UpperCamelCase : Any = [], []
for question, preds_labels in question_map.items():
__UpperCamelCase , __UpperCamelCase : List[str] = zip(*_lowerCAmelCase )
__UpperCamelCase : Optional[int] = fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase , average="macro" )
fas.append(_lowerCAmelCase )
__UpperCamelCase : Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCAmelCase ) )
ems.append(_lowerCAmelCase )
__UpperCamelCase : List[Any] = float(sum(_lowerCAmelCase ) / len(_lowerCAmelCase ) )
__UpperCamelCase : Optional[int] = sum(_lowerCAmelCase ) / len(_lowerCAmelCase )
__UpperCamelCase : List[str] = float(fa_score(y_true=_lowerCAmelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ) -> int:
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCamelCase , __UpperCamelCase , fa_avg="macro" )
elif self.config_name == "record":
__UpperCamelCase : Union[str, Any] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__UpperCamelCase : Union[str, Any] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__UpperCamelCase , __UpperCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 327
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_ ():
__UpperCamelCase : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowerCAmelCase )
env_command_parser(subparsers=_lowerCAmelCase )
launch_command_parser(subparsers=_lowerCAmelCase )
tpu_command_parser(subparsers=_lowerCAmelCase )
test_command_parser(subparsers=_lowerCAmelCase )
# Let's go
__UpperCamelCase : int = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 327
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = FunnelTokenizer
A__ = FunnelTokenizerFast
A__ = True
A__ = True
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : List[Any] , **__lowerCamelCase : Tuple ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase ( self : Any , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : int = "UNwant\u00E9d,running"
lowerCamelCase__ : Optional[Any] = "unwanted, running"
return input_text, output_text
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
lowerCamelCase__ : Dict = tokenizer("UNwant\u00E9d,running" )
lowerCamelCase__ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCamelCase__ : Union[str, Any] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 5
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5
| 1
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
lowercase_ = False
lowercase_ = False
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
return TrainCommand(lowercase )
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def a ( a_ : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=a_ , required=a_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=a_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=a_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=a_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=a_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=a_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=a_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=a_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=a_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=a_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=a_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=a_ , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=a_ , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=a_ )
def __init__( self : Tuple , a_ : Namespace )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = logging.get_logger("""transformers-cli/training""" )
UpperCAmelCase_ : Optional[Any] = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=a_ )
UpperCAmelCase_ : Optional[int] = args.output
UpperCAmelCase_ : str = args.column_label
UpperCAmelCase_ : List[Any] = args.column_text
UpperCAmelCase_ : List[str] = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
UpperCAmelCase_ : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
UpperCAmelCase_ : Union[str, Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ : List[Any] = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
UpperCAmelCase_ : int = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ : Optional[Any] = args.validation_split
UpperCAmelCase_ : Optional[Any] = args.train_batch_size
UpperCAmelCase_ : Union[str, Any] = args.valid_batch_size
UpperCAmelCase_ : List[str] = args.learning_rate
UpperCAmelCase_ : List[Any] = args.adam_epsilon
def a ( self : str )-> Union[str, Any]:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a ( self : Any )-> List[str]:
"""simple docstring"""
raise NotImplementedError
def a ( self : Optional[int] )-> List[str]:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 470
|
"""simple docstring"""
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCAmelCase_ : Union[str, Any] = """"""
UpperCAmelCase_ : List[Any] = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCAmelCase_ ,UpperCAmelCase_ : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCAmelCase_ : Dict = [1 for i in range(len(lowercase ) )]
# for each character in new_string find corresponding palindromic string
UpperCAmelCase_ : Optional[Any] = 0
for j in range(len(lowercase ) ):
UpperCAmelCase_ : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCAmelCase_ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCAmelCase_ : List[Any] = j - k + 1 # noqa: E741
UpperCAmelCase_ : Union[str, Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCAmelCase_ : Optional[Any] = length[j]
UpperCAmelCase_ : int = j
# create that string
UpperCAmelCase_ : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
| 1
|
"""simple docstring"""
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 363
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 363
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __magic_name__ ( __a : str ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase__ = model_type_to_module_name(__a )
UpperCamelCase__ = importlib.import_module(f".{module_name}" , """transformers.models""" )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__a , """__name__""" , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase__ = importlib.import_module("""transformers""" )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def __magic_name__ ( __a : Union[str, os.PathLike] , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : Dict , ):
'''simple docstring'''
UpperCamelCase__ = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__a , encoding="""utf-8""" ) as reader:
return json.load(__a )
class __A:
"""simple docstring"""
def __init__(self ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = True
UpperCamelCase__ , UpperCamelCase__ = ImageProcessingMixin.get_image_processor_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = config_dict.get("""image_processor_type""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase__ = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase__ = config_dict.pop("""feature_extractor_type""" , SCREAMING_SNAKE_CASE_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase__ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase__ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# It could be in `config.image_processor_type``
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , """image_processor_type""" , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase__ = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase__ = image_processor_class_from_name(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_processor_auto_map is not None
UpperCamelCase__ = image_processor_class is not None or type(SCREAMING_SNAKE_CASE_ ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase__ = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if has_remote_code and trust_remote_code:
UpperCamelCase__ = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase__ = IMAGE_PROCESSOR_MAPPING[type(SCREAMING_SNAKE_CASE_ )]
return image_processor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
IMAGE_PROCESSOR_MAPPING.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 513
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCamelCase_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCamelCase_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCamelCase_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCamelCase_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : List[str] = FLAX_MODEL_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModel)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : Optional[int] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : int = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _SCREAMING_SNAKE_CASE( _BaseAutoModelClass ):
A_ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 702
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
def __init__( self : Any , UpperCamelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ :Any = nn.ModuleList(UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Union[torch.Tensor, float, int] , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : List[torch.tensor] , UpperCamelCase_ : List[float] , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[torch.Tensor] = None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCamelCase_ , UpperCamelCase_ , self.nets ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = controlnet(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE__ :int = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCamelCase_ , UpperCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Union[str, os.PathLike] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Callable = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = 0
SCREAMING_SNAKE_CASE__ :List[str] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCamelCase_ , is_main_process=UpperCamelCase_ , save_function=UpperCamelCase_ , safe_serialization=UpperCamelCase_ , variant=UpperCamelCase_ , )
idx += 1
SCREAMING_SNAKE_CASE__ :str = model_path_to_save + f'''_{idx}'''
@classmethod
def __lowerCamelCase ( cls : str , UpperCamelCase_ : Optional[Union[str, os.PathLike]] , **UpperCamelCase_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[int] = 0
SCREAMING_SNAKE_CASE__ :Tuple = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE__ :Dict = pretrained_model_path
while os.path.isdir(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = ControlNetModel.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
controlnets.append(UpperCamelCase_ )
idx += 1
SCREAMING_SNAKE_CASE__ :List[Any] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(UpperCamelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCamelCase_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(UpperCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(UpperCamelCase_ )
| 320
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowerCamelCase = random.Random()
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Dict=1.0, UpperCamelCase_ : Tuple=None, UpperCamelCase_ : Union[str, Any]=None ):
if rng is None:
UpperCAmelCase__ :Optional[int] = global_rng
UpperCAmelCase__ :int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any=7 , __lowerCamelCase : List[str]=4_0_0 , __lowerCamelCase : Tuple=2_0_0_0 , __lowerCamelCase : Tuple=2_4 , __lowerCamelCase : Any=2_4 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=1_6_0_0_0 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Dict=True , ):
UpperCAmelCase__ :List[Any] = parent
UpperCAmelCase__ :Optional[int] = batch_size
UpperCAmelCase__ :Optional[int] = min_seq_length
UpperCAmelCase__ :Tuple = max_seq_length
UpperCAmelCase__ :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase__ :Any = feature_size
UpperCAmelCase__ :Optional[Any] = num_mel_bins
UpperCAmelCase__ :List[Any] = padding_value
UpperCAmelCase__ :Optional[int] = sampling_rate
UpperCAmelCase__ :str = return_attention_mask
UpperCAmelCase__ :Optional[Any] = do_normalize
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : int=False , __lowerCamelCase : str=False ):
def _flatten(__lowerCamelCase : Dict ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
UpperCAmelCase__ :Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase__ :Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase__ :List[str] = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
UpperCAmelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :Union[str, Any] = SpeechaTextFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : str ):
self.assertTrue(np.all(np.mean(__lowerCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase__ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase__ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :Optional[int] = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase__ :int = feature_extractor(__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase__ :Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
UpperCAmelCase__ :List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase__ :Dict = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_features
UpperCAmelCase__ :Optional[int] = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase__ :List[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCAmelCase__ :Any = np.asarray(__lowerCamelCase )
UpperCAmelCase__ :int = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_features
UpperCAmelCase__ :int = feature_extractor(__lowerCamelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase__ :Union[str, Any] = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase__ :Any = feature_extractor(
__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_attention_mask=__lowerCamelCase )
UpperCAmelCase__ :Dict = inputs.input_features
UpperCAmelCase__ :Tuple = inputs.attention_mask
UpperCAmelCase__ :Any = [np.sum(__lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase__ :Optional[Any] = [None, 1_6, None]
for max_length, padding in zip(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase__ :List[Any] = feature_extractor(
__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' , return_attention_mask=__lowerCamelCase )
UpperCAmelCase__ :str = inputs.input_features
UpperCAmelCase__ :str = inputs.attention_mask
UpperCAmelCase__ :Optional[int] = [np.sum(__lowerCamelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :str = feature_extractor(
__lowerCamelCase , padding='''max_length''' , max_length=4 , truncation=__lowerCamelCase , return_tensors='''np''' , return_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :str = inputs.input_features
UpperCAmelCase__ :List[Any] = inputs.attention_mask
UpperCAmelCase__ :str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :Optional[int] = feature_extractor(
__lowerCamelCase , padding='''longest''' , max_length=4 , truncation=__lowerCamelCase , return_tensors='''np''' , return_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :List[Any] = inputs.input_features
UpperCAmelCase__ :Dict = inputs.attention_mask
UpperCAmelCase__ :int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCAmelCase__ :Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCAmelCase__ :str = feature_extractor(
__lowerCamelCase , padding='''longest''' , max_length=1_6 , truncation=__lowerCamelCase , return_tensors='''np''' , return_attention_mask=__lowerCamelCase , )
UpperCAmelCase__ :Dict = inputs.input_features
UpperCAmelCase__ :Any = inputs.attention_mask
UpperCAmelCase__ :Optional[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
import torch
UpperCAmelCase__ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :int = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCAmelCase__ :Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase__ :Tuple = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase__ :Union[str, Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : List[Any] ):
from datasets import load_dataset
UpperCAmelCase__ :Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase__ :List[Any] = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# fmt: off
UpperCAmelCase__ :List[Any] = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
UpperCAmelCase__ :Optional[Any] = self._load_datasamples(1 )
UpperCAmelCase__ :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase__ :Optional[int] = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __lowerCamelCase , atol=1e-4 ) )
| 467
|
"""simple docstring"""
def UpperCamelCase ( _A ) -> int:
lowercase : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( _A = 100 ) -> int:
lowercase : Union[str, Any] = 1
lowercase : Tuple = 2
for i in range(2 , max_n + 1 ):
lowercase : Any = pre_numerator
lowercase : Dict = 2 * i // 3 if i % 3 == 0 else 1
lowercase : Optional[Any] = cur_numerator
lowercase : str = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(F'{solution() = }')
| 264
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = ['pixel_values']
def __init__( self : List[Any] , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **snake_case : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**snake_case )
__magic_name__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
__magic_name__ : Any = get_size_dict(snake_case , default_to_square=snake_case )
__magic_name__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__magic_name__ : Union[str, Any] = get_size_dict(snake_case , param_name='''crop_size''' )
__magic_name__ : Tuple = do_resize
__magic_name__ : Any = size
__magic_name__ : Optional[Any] = resample
__magic_name__ : Optional[int] = do_center_crop
__magic_name__ : Dict = crop_size
__magic_name__ : Tuple = do_rescale
__magic_name__ : Optional[Any] = rescale_factor
__magic_name__ : Any = do_normalize
__magic_name__ : Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__magic_name__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self : Optional[Any] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ : Optional[Any] = get_size_dict(snake_case , default_to_square=snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__magic_name__ : int = int((256 / 224) * size['''shortest_edge'''] )
__magic_name__ : Optional[Any] = get_resize_output_image_size(snake_case , size=snake_case , default_to_square=snake_case )
__magic_name__ : Any = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=snake_case , data_format=snake_case , **snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> np.ndarray:
'''simple docstring'''
__magic_name__ : List[Any] = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(snake_case , size=(size['''height'''], size['''width''']) , data_format=snake_case , **snake_case )
def _UpperCAmelCase ( self : Tuple , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def _UpperCAmelCase ( self : Any , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def _UpperCAmelCase ( self : Optional[Any] , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = None , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[TensorType] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : str , ) -> BatchFeature:
'''simple docstring'''
__magic_name__ : Dict = do_resize if do_resize is not None else self.do_resize
__magic_name__ : List[Any] = resample if resample is not None else self.resample
__magic_name__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : Any = image_mean if image_mean is not None else self.image_mean
__magic_name__ : List[str] = image_std if image_std is not None else self.image_std
__magic_name__ : Tuple = size if size is not None else self.size
__magic_name__ : Dict = get_size_dict(snake_case , default_to_square=snake_case )
__magic_name__ : int = crop_size if crop_size is not None else self.crop_size
__magic_name__ : str = get_size_dict(snake_case , param_name='''crop_size''' )
__magic_name__ : Optional[Any] = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__magic_name__ : Any = [to_numpy_array(snake_case ) for image in images]
if do_resize:
__magic_name__ : Any = [self.resize(snake_case , snake_case , snake_case ) for image in images]
if do_center_crop:
__magic_name__ : List[str] = [self.center_crop(snake_case , snake_case ) for image in images]
if do_rescale:
__magic_name__ : Optional[int] = [self.rescale(snake_case , snake_case ) for image in images]
if do_normalize:
__magic_name__ : Dict = [self.normalize(snake_case , snake_case , snake_case ) for image in images]
__magic_name__ : Union[str, Any] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
__magic_name__ : int = {'''pixel_values''': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 147
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = 10
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4]
__magic_name__ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__magic_name__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__magic_name__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(snake_case , self.block_size , 0 ) , snake_case )
def _UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__magic_name__ , __magic_name__ : Optional[Any] = process_story(snake_case )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ''''''
__magic_name__ , __magic_name__ : Optional[int] = process_story(snake_case )
self.assertEqual(snake_case , [] )
self.assertEqual(snake_case , [] )
def _UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
__magic_name__ : Optional[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__magic_name__ , __magic_name__ : Union[str, Any] = process_story(snake_case )
__magic_name__ : int = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(snake_case , snake_case )
__magic_name__ : Tuple = ['''It was the best of times.''']
self.assertEqual(snake_case , snake_case )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[int] = torch.tensor([1, 2, 3, 4] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(snake_case , 0 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__magic_name__ : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 23 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__magic_name__ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(snake_case , 1 ).numpy() , expected.numpy() )
def _UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = 101
__magic_name__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__magic_name__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__magic_name__ : List[str] = compute_token_type_ids(snake_case , snake_case )
np.testing.assert_array_equal(snake_case , snake_case )
| 147
| 1
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): # noqa: E741
A_ : Any = len(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = 0
A_ : int = [0] * n
A_ : Tuple = [False] * n
A_ : Union[str, Any] = [False] * n
def dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if parent == root:
out_edge_count += 1
A_ : Union[str, Any] = True
A_ : Union[str, Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A_ : Union[str, Any] = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : List[str] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A_ : Union[str, Any] = True
# AP found via cycle
if at == low[to]:
A_ : str = True
else:
A_ : Dict = min(low[at] , SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE ):
if not visited[i]:
A_ : List[str] = 0
A_ : Tuple = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 , SCREAMING_SNAKE_CASE )
A_ : Optional[int] = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 590
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
UpperCamelCase , UpperCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
UpperCamelCase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
UpperCamelCase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 590
| 1
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : Dict = field(
default=_UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCAmelCase )} )
UpperCamelCase_ : Optional[Any] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
UpperCamelCase_ : Optional[Any] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : Tuple = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
UpperCamelCase_ : Union[str, Any] = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
UpperCamelCase_ : List[str] = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
UpperCamelCase_ : Tuple = field(
default=_UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : str = field(
default=_UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
UpperCamelCase_ : int = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase_ : Union[str, Any] = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
UpperCamelCase_ : Optional[int] = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
UpperCamelCase_ : List[Any] = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''train'''
UpperCamelCase_ : Dict = '''dev'''
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : int = 4_2
UpperCamelCase_ : Union[str, Any] = 4_2
UpperCamelCase_ : Dict = 4_2
UpperCamelCase_ : Any = 4_2
def __init__( self : Optional[int] , A_ : SquadDataTrainingArguments , A_ : PreTrainedTokenizer , A_ : Optional[int] = None , A_ : Union[str, Split] = Split.train , A_ : Optional[bool] = False , A_ : Optional[str] = None , A_ : Optional[str] = "pt" , ) -> Tuple:
__snake_case = args
__snake_case = is_language_sensitive
__snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A , __A ):
try:
__snake_case = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__snake_case = mode
# Load data features from cache or dataset file
__snake_case = "v2" if args.version_2_with_negative else "v1"
__snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
__snake_case = time.time()
__snake_case = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case = self.old_features["features"]
__snake_case = self.old_features.get('''dataset''' , __A )
__snake_case = self.old_features.get('''examples''' , __A )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
__snake_case = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case = self.processor.get_train_examples(args.data_dir )
__snake_case = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__A , )
__snake_case = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , __A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> Dict:
return len(self.features )
def __getitem__( self : Tuple , A_ : str ) -> List[str]:
# Convert to Tensors and build dataset
__snake_case = self.features[i]
__snake_case = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 711
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
__lowercase : Optional[int] = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
__lowercase : Dict = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__snake_case = char
__snake_case = set(snake_case)
return pairs
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[int] , A_ : str="<s>" , A_ : List[str]="</s>" , A_ : List[Any]="</s>" , A_ : List[str]="<s>" , A_ : Optional[int]="<unk>" , A_ : List[str]="<pad>" , A_ : Any="<mask>" , **A_ : int , ) -> Tuple:
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
__snake_case = vocab_file
__snake_case = merges_file
__snake_case = {}
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
self.add_from_file(A_ )
__snake_case = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[:-1]
__snake_case = [tuple(merge.split()[:-1] ) for merge in merges]
__snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
__snake_case = {}
def lowercase ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def lowercase ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self : List[Any] ) -> str:
return len(self.encoder )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : str , A_ : List[Any] ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
__snake_case = tuple(A_ )
__snake_case = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__snake_case = get_pairs(A_ )
if not pairs:
return token
while True:
__snake_case = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(A_ ):
try:
__snake_case = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(A_ )
__snake_case = new_word
if len(A_ ) == 1:
break
else:
__snake_case = get_pairs(A_ )
__snake_case = '''@@ '''.join(A_ )
__snake_case = word[:-4]
__snake_case = word
return word
def lowercase ( self : int , A_ : Tuple ) -> int:
__snake_case = []
__snake_case = re.findall(R'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def lowercase ( self : Union[str, Any] , A_ : Tuple ) -> int:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def lowercase ( self : int , A_ : Dict ) -> Optional[Any]:
return self.decoder.get(A_ , self.unk_token )
def lowercase ( self : Union[str, Any] , A_ : int ) -> Tuple:
__snake_case = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__snake_case = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def lowercase ( self : Optional[int] , A_ : Union[str, Any] ) -> Dict:
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__snake_case = f.readlines()
for lineTmp in lines:
__snake_case = lineTmp.strip()
__snake_case = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__snake_case = line[:idx]
__snake_case = len(self.encoder )
| 93
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Any = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase__ : Dict = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 295
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=14 , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=99 , UpperCAmelCase : str=32 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : List[Any]=0.0_2 , ) -> List[Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Optional[Any] = rotary_dim
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
def A_ ( self : str ) -> int:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs
lowerCamelCase__ : List[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Tuple = 20
lowerCamelCase__ : Dict = model_class_name(UpperCAmelCase )
lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Any = 20
lowerCamelCase__ : Any = model_class_name(UpperCAmelCase )
lowerCamelCase__ : List[str] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : str = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Optional[Any] = FlaxGPTJModelTester(self )
def A_ ( self : Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCamelCase__ : Dict = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCamelCase__ : str = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[int] = model.config.eos_token_id
lowerCamelCase__ : Union[str, Any] = jax.jit(model.generate )
lowerCamelCase__ : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCamelCase__ : int = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Dict = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = pt_inputs['input_ids'].shape
lowerCamelCase__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Dict = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCamelCase__ : Tuple = fx_state
with torch.no_grad():
lowerCamelCase__ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : Dict = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCamelCase__ : int = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A_ ( self : Union[str, Any] ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : int = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : List[str] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = pt_inputs['input_ids'].shape
lowerCamelCase__ : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Any = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : List[Any] = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A_ ( self : List[str] ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 295
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""google/fnet-base""": 512,
"""google/fnet-large""": 512,
}
UpperCamelCase = """▁"""
class lowercase_ (_UpperCAmelCase ):
A__ : List[str] = VOCAB_FILES_NAMES
A__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ['''input_ids''', '''token_type_ids''']
A__ : Any = FNetTokenizer
def __init__( self , a_=None , a_=None , a_=False , a_=True , a_=True , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ) ->List[Any]:
'''simple docstring'''
_a = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_ )
if isinstance(a_ , a_ )
else mask_token
)
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def lowerCamelCase__ ( self , a_ , a_ = None ) ->List[int]:
'''simple docstring'''
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self , a_ , a_ = None ) ->List[int]:
'''simple docstring'''
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self , a_ , a_ = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_a = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 612
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 612
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=[] ):
SCREAMING_SNAKE_CASE = size[0] - overlap_pixels * 2
SCREAMING_SNAKE_CASE = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
SCREAMING_SNAKE_CASE = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
SCREAMING_SNAKE_CASE = np.pad(UpperCAmelCase__ , mode="linear_ramp" , pad_width=UpperCAmelCase__ , end_values=0 )
if "l" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
SCREAMING_SNAKE_CASE = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
SCREAMING_SNAKE_CASE = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
SCREAMING_SNAKE_CASE = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ):
return max(UpperCAmelCase__ , min(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] , UpperCAmelCase__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase (UpperCAmelCase__ : [int] , UpperCAmelCase__ : int , UpperCAmelCase__ : [int] ):
SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
SCREAMING_SNAKE_CASE = clamp_rect(UpperCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCAmelCase__ , (original_slice, 0) )
return result
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ):
SCREAMING_SNAKE_CASE = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
SCREAMING_SNAKE_CASE = tile.crop(UpperCAmelCase__ )
return tile
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = n % d
return n - divisor
class lowercase ( a ):
def __init__( self : Tuple , _UpperCamelCase : AutoencoderKL , _UpperCamelCase : CLIPTextModel , _UpperCamelCase : CLIPTokenizer , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : DDPMScheduler , _UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCamelCase : int = 350 , ) -> Dict:
'''simple docstring'''
super().__init__(
vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , max_noise_level=_UpperCamelCase , )
def __snake_case( self : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , **_UpperCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
SCREAMING_SNAKE_CASE = add_overlap_rect(_UpperCamelCase , _UpperCamelCase , image.size )
SCREAMING_SNAKE_CASE = image.crop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
SCREAMING_SNAKE_CASE = translated_slice_x - (original_image_slice / 2)
SCREAMING_SNAKE_CASE = max(0 , _UpperCamelCase )
SCREAMING_SNAKE_CASE = squeeze_tile(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = to_input.size
SCREAMING_SNAKE_CASE = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = super(_UpperCamelCase , self ).__call__(image=_UpperCamelCase , **_UpperCamelCase ).images[0]
SCREAMING_SNAKE_CASE = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = unsqueeze_tile(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
SCREAMING_SNAKE_CASE = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
SCREAMING_SNAKE_CASE = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_UpperCamelCase ) , mode="L" , )
final_image.paste(
_UpperCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , _UpperCamelCase : Union[str, List[str]] , _UpperCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCamelCase : int = 75 , _UpperCamelCase : float = 9.0 , _UpperCamelCase : int = 50 , _UpperCamelCase : Optional[Union[str, List[str]]] = None , _UpperCamelCase : Optional[int] = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : Optional[torch.Generator] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 128 , _UpperCamelCase : int = 32 , _UpperCamelCase : int = 32 , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
SCREAMING_SNAKE_CASE = math.ceil(image.size[0] / tile_size )
SCREAMING_SNAKE_CASE = math.ceil(image.size[1] / tile_size )
SCREAMING_SNAKE_CASE = tcx * tcy
SCREAMING_SNAKE_CASE = 0
for y in range(_UpperCamelCase ):
for x in range(_UpperCamelCase ):
self._process_tile(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , prompt=_UpperCamelCase , num_inference_steps=_UpperCamelCase , guidance_scale=_UpperCamelCase , noise_level=_UpperCamelCase , negative_prompt=_UpperCamelCase , num_images_per_prompt=_UpperCamelCase , eta=_UpperCamelCase , generator=_UpperCamelCase , latents=_UpperCamelCase , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __lowerCamelCase ():
# Run a demo
SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-x4-upscaler"
SCREAMING_SNAKE_CASE = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCAmelCase__ , revision="fp16" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE = pipe.to("cuda" )
SCREAMING_SNAKE_CASE = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(UpperCAmelCase__ : List[Any] ):
print(F"progress: {obj['progress']:.4f}" )
obj["image"].save("diffusers_library_progress.jpg" )
SCREAMING_SNAKE_CASE = pipe(image=UpperCAmelCase__ , prompt="Black font, white background, vector" , noise_level=4_0 , callback=UpperCAmelCase__ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 403
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = RemBertConfig.from_json_file(UpperCAmelCase__ )
print("Building PyTorch model from configuration: {}".format(str(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 403
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :List[Any] = 'swinv2'
_UpperCAmelCase :Tuple = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1e-5 , A_=32 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = embed_dim
UpperCamelCase : Dict = depths
UpperCamelCase : List[Any] = len(A_ )
UpperCamelCase : Optional[Any] = num_heads
UpperCamelCase : List[Any] = window_size
UpperCamelCase : Any = mlp_ratio
UpperCamelCase : List[Any] = qkv_bias
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : Tuple = drop_path_rate
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : List[Any] = use_absolute_embeddings
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : Any = initializer_range
UpperCamelCase : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase : Union[str, Any] = int(embed_dim * 2 ** (len(A_ ) - 1) )
UpperCamelCase : Optional[Any] = (0, 0, 0, 0)
| 38
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = '''Hello, World!'''
_lowercase = '''en_XX'''
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->List[Any]:
__lowercase = Path("data_bin" )
__lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__magic_name__ ).parent ) , checkpoint_file=Path(__magic_name__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(__magic_name__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(__magic_name__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(__magic_name__ )
__lowercase = xmod.model.encoder.sentence_encoder
__lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowercase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print("Our X-MOD config:" , __magic_name__ )
__lowercase = XmodForSequenceClassification(__magic_name__ ) if classification_head else XmodForMaskedLM(__magic_name__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowercase = xmod_sent_encoder.embed_tokens.weight
__lowercase = xmod_sent_encoder.embed_positions.weight
__lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowercase = xmod_sent_encoder.layernorm_embedding.weight
__lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowercase = model.roberta.encoder.layer[i]
__lowercase = xmod_sent_encoder.layers[i]
# self attention
__lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__lowercase = xmod_layer.self_attn.q_proj.weight
__lowercase = xmod_layer.self_attn.q_proj.bias
__lowercase = xmod_layer.self_attn.k_proj.weight
__lowercase = xmod_layer.self_attn.k_proj.bias
__lowercase = xmod_layer.self_attn.v_proj.weight
__lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__lowercase = xmod_layer.self_attn.out_proj.weight
__lowercase = xmod_layer.self_attn.out_proj.bias
__lowercase = xmod_layer.self_attn_layer_norm.weight
__lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
# output
__lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__lowercase = xmod_layer.fca.weight
__lowercase = xmod_layer.fca.bias
__lowercase = xmod_layer.final_layer_norm.weight
__lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowercase = xmod_layer.adapter_layer_norm.weight
__lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowercase = bert_output.adapter_modules[lang_code]
__lowercase = xmod_layer.adapter_modules[lang_code]
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
__lowercase = from_adapter.fca.weight
__lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowercase = xmod_sent_encoder.layer_norm.weight
__lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowercase = xmod.model.classification_heads['''mnli'''].dense.weight
__lowercase = xmod.model.classification_heads['''mnli'''].dense.bias
__lowercase = xmod.model.classification_heads['''mnli'''].out_proj.weight
__lowercase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowercase = xmod.model.encoder.lm_head.dense.weight
__lowercase = xmod.model.encoder.lm_head.dense.bias
__lowercase = xmod.model.encoder.lm_head.layer_norm.weight
__lowercase = xmod.model.encoder.lm_head.layer_norm.bias
__lowercase = xmod.model.encoder.lm_head.weight
__lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowercase = xmod.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__magic_name__ )
__lowercase = model(__magic_name__ )[0]
if classification_head:
__lowercase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__magic_name__ ) )
else:
__lowercase = xmod.model(__magic_name__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
__lowercase = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(__magic_name__ ).mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowercase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 118
|
from manim import *
class _A ( _lowerCamelCase ):
def __a ( self : Dict ) -> Any:
"""simple docstring"""
lowercase : Tuple = Rectangle(height=0.5 , width=0.5 )
lowercase : str = Rectangle(height=0.25 , width=0.25 )
lowercase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase : Optional[int] = [mem.copy() for i in range(6 )]
lowercase : Optional[int] = [mem.copy() for i in range(6 )]
lowercase : int = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : Optional[Any] = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : List[str] = VGroup(_A , _A ).arrange(_A , buff=0 )
lowercase : Any = Text('''CPU''' , font_size=24 )
lowercase : Union[str, Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_A )
lowercase : Any = [mem.copy() for i in range(4 )]
lowercase : Dict = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : int = Text('''GPU''' , font_size=24 )
lowercase : Optional[int] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
gpu.move_to([-1, -1, 0] )
self.add(_A )
lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase : Any = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : Union[str, Any] = Text('''Model''' , font_size=24 )
lowercase : Union[str, Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
model.move_to([3, -1.0, 0] )
self.add(_A )
lowercase : Optional[int] = []
lowercase : str = []
lowercase : Tuple = []
for i, rect in enumerate(_A ):
rect.set_stroke(_A )
lowercase : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_A )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_A , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_A , buff=0.0 )
self.add(_A )
model_cpu_arr.append(_A )
self.add(*_A , *_A , *_A )
lowercase : List[Any] = [mem.copy() for i in range(6 )]
lowercase : str = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : List[str] = Text('''Loaded Checkpoint''' , font_size=24 )
lowercase : Tuple = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
checkpoint.move_to([3, 0.5, 0] )
self.add(_A )
lowercase : int = []
lowercase : List[str] = []
for i, rect in enumerate(_A ):
lowercase : int = fill.copy().set_fill(_A , opacity=0.7 )
target.move_to(_A )
ckpt_arr.append(_A )
lowercase : Tuple = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_A )
self.add(*_A , *_A )
lowercase : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : Tuple = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_A , _A )
lowercase : Optional[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_A )
lowercase : Union[str, Any] = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowercase : List[str] = [meta_mem.copy() for i in range(6 )]
lowercase : str = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : Optional[Any] = VGroup(*_A ).arrange(_A , buff=0 )
lowercase : Union[str, Any] = VGroup(_A , _A ).arrange(_A , buff=0 )
lowercase : Tuple = Text('''Disk''' , font_size=24 )
lowercase : List[Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_A , run_time=3 ) , Write(_A , run_time=1 ) , Create(_A , run_time=1 ) )
lowercase : Any = []
for i, rect in enumerate(_A ):
lowercase : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_A , run_time=1.5 ) )
self.play(*_A )
self.play(FadeOut(_A ) )
lowercase : List[Any] = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_A , run_time=3 ) )
self.play(
FadeOut(_A , _A , *_A , *_A ) , )
self.wait()
| 217
| 0
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=3, _a=7, _a=True, _a=True, _a=False, _a=True, _a=99, _a=32, _a=5, _a=4, _a=37, _a="gelu", _a=0.1, _a=0.1, _a=5_12, _a=16, _a=2, _a=0.02, _a=3, _a=4, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size], self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Any:
return FalconConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase_, initializer_range=self.initializer_range, pad_token_id=1, new_decoder_architecture=lowerCAmelCase_, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = FalconModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FalconModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, encoder_hidden_states=lowerCAmelCase_, encoder_attention_mask=lowerCAmelCase_, )
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, encoder_hidden_states=lowerCAmelCase_, )
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = FalconForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, _a, _a, _a, ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FalconForCausalLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, encoder_hidden_states=lowerCAmelCase_, encoder_attention_mask=lowerCAmelCase_, use_cache=lowerCAmelCase_, )
__SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask], dim=-1 )
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, encoder_hidden_states=lowerCAmelCase_, encoder_attention_mask=lowerCAmelCase_, output_hidden_states=lowerCAmelCase_, )["hidden_states"][0]
__SCREAMING_SNAKE_CASE = model(
lowerCAmelCase_, attention_mask=lowerCAmelCase_, encoder_hidden_states=lowerCAmelCase_, encoder_attention_mask=lowerCAmelCase_, past_key_values=lowerCAmelCase_, output_hidden_states=lowerCAmelCase_, )["hidden_states"][0]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase_, lowerCAmelCase_, atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ =(FalconForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = FalconModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self, config_class=lowerCAmelCase_, hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__SCREAMING_SNAKE_CASE = alibi
self.model_tester.create_and_check_model(lowerCAmelCase_, *lowerCAmelCase_ )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "single_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = FalconForCausalLM(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, use_cache=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = model._convert_to_rw_cache(result.past_key_values )
__SCREAMING_SNAKE_CASE = model._convert_cache_to_standard_format(lowerCAmelCase_, lowerCAmelCase_ )
for layer in range(len(lowerCAmelCase_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "multi_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE = FalconForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase_, attention_mask=lowerCAmelCase_, labels=lowerCAmelCase_ )
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> List[Any]:
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase_, "use_cache" ):
return
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ )
if "use_cache" not in inputs:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__SCREAMING_SNAKE_CASE = (
getattr(lowerCAmelCase_, "decoder_layers", lowerCAmelCase_ )
or getattr(lowerCAmelCase_, "num_decoder_layers", lowerCAmelCase_ )
or config.num_hidden_layers
)
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_, "num_kv_heads", config.num_attention_heads )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_, "d_model", config.hidden_size )
__SCREAMING_SNAKE_CASE = embed_dim // num_attention_heads
__SCREAMING_SNAKE_CASE = outputs["past_key_values"]
self.assertEqual(len(lowerCAmelCase_ ), lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = inputs["input_ids"].shape
for i in range(lowerCAmelCase_ ):
if config.new_decoder_architecture:
__SCREAMING_SNAKE_CASE = config.num_attention_heads
elif config.multi_query:
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(len(past_kv[0] ), 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer("My favorite food is", return_tensors="pt" ).to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase_, do_sample=lowerCAmelCase_, max_new_tokens=19 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase_ )[0]
self.assertEqual(lowerCAmelCase_, lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(lowerCAmelCase_ )
model.eval()
model.to(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer("My favorite food is", return_tensors="pt" ).to(lowerCAmelCase_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase_, do_sample=lowerCAmelCase_, max_new_tokens=4 )
model.generate(**lowerCAmelCase_, do_sample=lowerCAmelCase_, max_new_tokens=4 )
model.generate(**lowerCAmelCase_, num_beams=2, max_new_tokens=4 )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = FalconForCausalLM.from_pretrained(lowerCAmelCase_ )
model.eval()
model.to(device=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer("My favorite food is", return_tensors="pt" ).to(lowerCAmelCase_ )
# Test results are the same with and without cache
__SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase_, do_sample=lowerCAmelCase_, max_new_tokens=20, use_cache=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase_, do_sample=lowerCAmelCase_, max_new_tokens=20, use_cache=lowerCAmelCase_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 712
|
def _A ( __snake_case :int ) -> bool:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__SCREAMING_SNAKE_CASE = str(__snake_case )
__SCREAMING_SNAKE_CASE = "".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( __snake_case :float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 214
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __lowercase ( __lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_A = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 330
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71
| 0
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase : Optional[int] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
lowercase : int = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
lowercase : int = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = simple_accuracy(a_ , a_ )
A : Tuple = float(fa_score(y_true=a_ , y_pred=a_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = np.array(a_ )
A : Optional[int] = np.array(a_ )
A : Any = en_sentvecs.shape[0]
# mean centering
A : List[str] = en_sentvecs - np.mean(a_ , axis=0 )
A : Tuple = in_sentvecs - np.mean(a_ , axis=0 )
A : int = cdist(a_ , a_ , '''cosine''' )
A : List[Any] = np.array(range(a_ ) )
A : Any = sim.argsort(axis=1 )[:, :10]
A : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__snake_case , __snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 705
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase : List[str] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase : Any = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else 0.0
A : Tuple = n_correct / len(SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 343
| 0
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__magic_name__ = datasets.load_iris()
__magic_name__ = np.array(data['data'])
__magic_name__ = np.array(data['target'])
__magic_name__ = data['target_names']
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = train_test_split(X, y)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]):
return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple=5):
A_ : Tuple = zip(lowerCamelCase , lowerCamelCase)
# List of distances of all points from the point to be classified
A_ : int = []
for data_point in data:
A_ : Union[str, Any] = euclidean_distance(data_point[0] , lowerCamelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
A_ : Dict = [i[1] for i in sorted(lowerCamelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : List[Any] = Counter(lowerCamelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 665
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=a_ ):
_lowerCamelCase :Dict = ["keras_nlp"]
def __init__( self : List[str] , *UpperCamelCase : Any , **UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 716
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A = logging.getLogger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
# save results
if os.path.exists(__UpperCAmelCase ):
if os.path.exists(os.path.join(__UpperCAmelCase , """config.json""" ) ) and os.path.isfile(
os.path.join(__UpperCAmelCase , """config.json""" ) ):
os.remove(os.path.join(__UpperCAmelCase , """config.json""" ) )
if os.path.exists(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__UpperCAmelCase , """pytorch_model.bin""" ) )
else:
os.makedirs(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
lowerCAmelCase__ : Dict = 2
if unlogit:
lowerCAmelCase__ : Tuple = torch.pow(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = p * torch.log(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = 0
return -plogp.sum(dim=-1 )
def lowercase_ ( __UpperCAmelCase ) -> Any:
logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(__UpperCAmelCase ) ) ) )
for row in range(len(__UpperCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ : Dict = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
lowerCAmelCase__ : str = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
if head_mask is None:
lowerCAmelCase__ : Dict = torch.ones(__UpperCAmelCase , __UpperCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__UpperCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = 0.0
lowerCAmelCase__ : List[Any] = 0.0
for step, inputs in enumerate(tqdm(__UpperCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) , ) : Any = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase , head_mask=__UpperCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = entropy(attn.detach() , __UpperCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__UpperCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : Optional[Any] = torch.pow(torch.pow(__UpperCAmelCase , __UpperCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowerCAmelCase__ : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__UpperCAmelCase )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__UpperCAmelCase )
logger.info("""Head ranked by importance scores""" )
lowerCAmelCase__ : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ : Tuple = head_ranks.view_as(__UpperCAmelCase )
print_ad_tensor(__UpperCAmelCase )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase )
lowerCAmelCase__ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __UpperCAmelCase , original_score * args.masking_threshold )
lowerCAmelCase__ : Any = torch.ones_like(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ : Optional[int] = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ : Tuple = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ : Tuple = float("""Inf""" )
lowerCAmelCase__ : str = head_importance.view(-1 ).sort()[1]
if len(__UpperCAmelCase ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowerCAmelCase__ : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ : str = new_head_mask.view(-1 )
lowerCAmelCase__ : str = 0.0
lowerCAmelCase__ : Optional[int] = new_head_mask.view_as(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(__UpperCAmelCase )
# Compute metric and head importance again
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ : int = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__UpperCAmelCase )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = 1 / loss
lowerCAmelCase__ : List[str] = datetime.now() - before_time
lowerCAmelCase__ : Optional[Any] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : List[str] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__UpperCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : int = [
v,
]
assert sum(len(__UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ : Dict = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = compute_heads_importance(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , compute_entropy=__UpperCAmelCase , compute_importance=__UpperCAmelCase , head_mask=__UpperCAmelCase , actually_pruned=__UpperCAmelCase , )
lowerCAmelCase__ : List[str] = 1 / loss
lowerCAmelCase__ : int = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __UpperCAmelCase , __UpperCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __UpperCAmelCase , __UpperCAmelCase )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__UpperCAmelCase , args.output_dir )
def lowercase_ ( ) -> int:
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__UpperCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__UpperCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__UpperCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__UpperCAmelCase , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__UpperCAmelCase , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__UpperCAmelCase , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__UpperCAmelCase , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__UpperCAmelCase , default=42 )
parser.add_argument("""--local_rank""" , type=__UpperCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__UpperCAmelCase , default="""""" , help="""Can be used for distant debugging.""" )
lowerCAmelCase__ : Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__UpperCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowerCAmelCase__ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ : Union[str, Any] = torch.device("""cuda""" , args.local_rank )
lowerCAmelCase__ : int = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ : Union[str, Any] = nn.parallel.DistributedDataParallel(
__UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__UpperCAmelCase )
elif args.n_gpu > 1:
lowerCAmelCase__ : int = nn.DataParallel(__UpperCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__UpperCAmelCase )
torch.save(__UpperCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase )
# Prepare dataset
lowerCAmelCase__ : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ : Dict = (torch.from_numpy(__UpperCAmelCase ),)
lowerCAmelCase__ : Any = TensorDataset(*__UpperCAmelCase )
lowerCAmelCase__ : int = RandomSampler(__UpperCAmelCase )
lowerCAmelCase__ : Any = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ : Any = mask_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
prune_heads(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 507
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : int = "src/transformers"
SCREAMING_SNAKE_CASE : Optional[int] = "docs/source/en/tasks"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : Dict = f.readlines()
# Find the start prompt.
_lowercase : Optional[Any] = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
_lowercase : Union[str, Any] = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Dict = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : List[str] = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : Optional[Any] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = TASK_GUIDE_TO_MODELS[task_guide]
_lowercase : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
_lowercase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase , _lowercase , _lowercase , _lowercase : Any = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_lowercase : int = get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 89
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 0
|
from __future__ import annotations
from typing import TypedDict
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =4_2
UpperCAmelCase =4_2
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__lowerCamelCase ) )]
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_UpperCAmelCase : Tuple =all_rotations(__lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase : BWTTransformDict ={
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCamelCase ),
}
return response
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_UpperCAmelCase : Tuple =int(__lowerCamelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__lowerCamelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_UpperCAmelCase : Any =[''] * len(__lowerCamelCase )
for _ in range(len(__lowerCamelCase ) ):
for i in range(len(__lowerCamelCase ) ):
_UpperCAmelCase : int =bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase ='Provide a string that I will generate its BWT transform: '
lowercase =input(entry_msg).strip()
lowercase =bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 712
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=1_8 , snake_case=3_0 , snake_case=4_0_0 , snake_case=True , snake_case=None , snake_case=True , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =size if size is not None else {'height': 1_8, 'width': 1_8}
_UpperCAmelCase : Optional[Any] =parent
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Union[str, Any] =num_channels
_UpperCAmelCase : List[str] =image_size
_UpperCAmelCase : List[Any] =min_resolution
_UpperCAmelCase : Optional[Any] =max_resolution
_UpperCAmelCase : List[Any] =do_resize
_UpperCAmelCase : Union[str, Any] =size
_UpperCAmelCase : List[Any] =apply_ocr
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =LayoutLMvaImageProcessingTester(self)
@property
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case , 'do_resize'))
self.assertTrue(hasattr(snake_case , 'size'))
self.assertTrue(hasattr(snake_case , 'apply_ocr'))
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8})
_UpperCAmelCase : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2})
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image)
# Test not batched input
_UpperCAmelCase : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , snake_case)
self.assertIsInstance(encoding.boxes , snake_case)
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray)
# Test not batched input
_UpperCAmelCase : List[str] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : str =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCAmelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor)
# Test not batched input
_UpperCAmelCase : Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase : List[Any] =image_processing(snake_case , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# with apply_OCR = True
_UpperCAmelCase : str =LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase : Tuple =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_UpperCAmelCase : Tuple =Image.open(ds[0]['file']).convert('RGB')
_UpperCAmelCase : Any =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase : Dict =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_UpperCAmelCase : Optional[int] =[[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case)
self.assertListEqual(encoding.boxes , snake_case)
# with apply_OCR = False
_UpperCAmelCase : Dict =LayoutLMvaImageProcessor(apply_ocr=snake_case)
_UpperCAmelCase : Optional[int] =image_processing(snake_case , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
| 331
| 0
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :int = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PegasusTokenizer
snake_case_ = PegasusTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def UpperCamelCase_ ( self : int ,**A : Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : List[str] ,A : str ):
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self : Any ):
__A = "</s>"
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def UpperCamelCase_ ( self : List[str] ):
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"</s>" )
self.assertEqual(vocab_keys[-1] ,"v" )
self.assertEqual(len(A ) ,11_03 )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,11_03 )
def UpperCamelCase_ ( self : int ):
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__A = rust_tokenizer([raw_input_str] ,return_tensors=A ,add_special_tokens=A ).input_ids[0]
__A = py_tokenizer([raw_input_str] ,return_tensors=A ,add_special_tokens=A ).input_ids[0]
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__A = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str] ,return_tensors=A ).input_ids[0]
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__A = "To ensure a smooth flow of bank resolutions."
__A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str] ,return_tensors=A ).input_ids[0]
self.assertListEqual(A ,A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ["This is going to be way too long." * 1_50, "short example"]
__A = ["not super long but more than 5 tokens", "tiny"]
__A = self._large_tokenizer(A ,padding=A ,truncation=A ,return_tensors="pt" )
__A = self._large_tokenizer(
text_target=A ,max_length=5 ,padding=A ,truncation=A ,return_tensors="pt" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
__A = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="google/bigbird-pegasus-large-arxiv" ,revision="ba85d0851d708441f91440d509690f1ab6353415" ,)
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PegasusTokenizer
snake_case_ = PegasusTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(A ,offset=0 ,mask_token_sent=A ,mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def UpperCamelCase_ ( self : str ,**A : Union[str, Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Dict ):
return ("This is a test", "This is a test")
def UpperCamelCase_ ( self : int ):
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__A = rust_tokenizer([raw_input_str] ,return_tensors=A ,add_special_tokens=A ).input_ids[0]
__A = py_tokenizer([raw_input_str] ,return_tensors=A ,add_special_tokens=A ).input_ids[0]
self.assertListEqual(A ,A )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
__A = ["This is going to be way too long." * 10_00, "short example"]
__A = ["not super long but more than 5 tokens", "tiny"]
__A = self._large_tokenizer(A ,padding=A ,truncation=A ,return_tensors="pt" )
__A = self._large_tokenizer(
text_target=A ,max_length=5 ,padding=A ,truncation=A ,return_tensors="pt" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCamelCase_ ( self : Dict ):
__A = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__A = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A ,[1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] ,)
| 55
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( UpperCamelCase_ : Tuple ):
UpperCAmelCase__ :Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_, UpperCamelCase_ )
def a__ ( UpperCamelCase_ : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = emb.weight.shape
UpperCAmelCase__ :int = nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_ )
UpperCAmelCase__ :Tuple = emb.weight.data
return lin_layer
def a__ ( UpperCamelCase_ : str ):
UpperCAmelCase__ :List[str] = torch.load(UpperCamelCase_, map_location='''cpu''' )
UpperCAmelCase__ :List[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase__ :Union[str, Any] = MaMaaaConfig(
vocab_size=UpperCamelCase_, max_position_embeddings=1_024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
UpperCAmelCase__ :str = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase__ :Tuple = MaMaaaForConditionalGeneration(UpperCamelCase_ )
model.model.load_state_dict(UpperCamelCase_, strict=UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 467
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase_ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase__ = ["image_processor", "tokenizer"]
UpperCAmelCase__ = "OwlViTImageProcessor"
UpperCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowercase , )
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowercase , _lowercase )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="max_length" , _SCREAMING_SNAKE_CASE="np" , **_SCREAMING_SNAKE_CASE ) -> Dict:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(_lowercase , _lowercase ) or (isinstance(_lowercase , _lowercase ) and not isinstance(text[0] , _lowercase )):
__UpperCamelCase = [self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )]
elif isinstance(_lowercase , _lowercase ) and isinstance(text[0] , _lowercase ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(_lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowercase ) != max_num_queries:
__UpperCamelCase = t + [""" """] * (max_num_queries - len(_lowercase ))
__UpperCamelCase = self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )
encodings.append(_lowercase )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
_lowercase , return_tensors=_lowercase , **_lowercase ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.image_processor.post_process(*_lowercase , **_lowercase )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
return self.image_processor.post_process_object_detection(*_lowercase , **_lowercase )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*_lowercase , **_lowercase )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __lowercase( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def __lowercase( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowercase , )
return self.image_processor_class
@property
def __lowercase( self ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowercase , )
return self.image_processor
| 719
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__lowercase , default=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__lowercase , default=0 , help='cuda_id.' , )
__UpperCamelCase = parser.parse_args()
return args
def _a ( __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not len(__lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__UpperCamelCase , __UpperCamelCase = imgs[0].size
__UpperCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
__UpperCamelCase , __UpperCamelCase = grid.size
for i, img in enumerate(__lowercase ):
grid.paste(__lowercase , box=(i % cols * w, i // cols * h) )
return grid
def _a ( __lowercase , __lowercase="robotic cat with wings" , __lowercase=7.5 , __lowercase=50 , __lowercase=1 , __lowercase=42 , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(__lowercase )
__UpperCamelCase = pipeline(
__lowercase , guidance_scale=__lowercase , num_inference_steps=__lowercase , generator=__lowercase , num_images_per_prompt=__lowercase , ).images
__UpperCamelCase = int(math.sqrt(__lowercase ) )
__UpperCamelCase = image_grid(__lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_snake_case = parse_args()
# Load models and create wrapper for stable diffusion
_snake_case = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_snake_case = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_snake_case = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_snake_case = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_snake_case = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_snake_case = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_snake_case = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_snake_case = unet.to(torch.device('cuda', args.cuda_id))
_snake_case = pipeline.to(unet.device)
_snake_case , _snake_case = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_snake_case = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 567
| 0
|
"""simple docstring"""
def _UpperCamelCase ( A ):
UpperCamelCase_ =[]
for data in source_data:
for i, el in enumerate(snake_case__ ):
if len(snake_case__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(snake_case__ ) )
return data_lists
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =[]
for dlist, weight in zip(snake_case__ , snake_case__ ):
UpperCamelCase_ =min(snake_case__ )
UpperCamelCase_ =max(snake_case__ )
UpperCamelCase_ =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase_ =f"""Invalid weight of {weight:f} provided"""
raise ValueError(snake_case__ )
score_lists.append(snake_case__ )
return score_lists
def _UpperCamelCase ( A ):
UpperCamelCase_ =[0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(snake_case__ ):
UpperCamelCase_ =final_scores[j] + ele
return final_scores
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =get_data(snake_case__ )
UpperCamelCase_ =calculate_each_score(snake_case__ , snake_case__ )
UpperCamelCase_ =generate_final_scores(snake_case__ )
# append scores to source data
for i, ele in enumerate(snake_case__ ):
source_data[i].append(snake_case__ )
return source_data
| 391
|
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 95
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = SwinConfig()
_UpperCAmelCase = swin_name.split('''_''' )
_UpperCAmelCase = name_split[1]
_UpperCAmelCase = int(name_split[4] )
_UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 6, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase = 96
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase = 128
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (4, 8, 16, 32)
else:
_UpperCAmelCase = 192
_UpperCAmelCase = (2, 2, 18, 2)
_UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase = 2_1841
else:
_UpperCAmelCase = 1000
_UpperCAmelCase = '''huggingface/label-files'''
_UpperCAmelCase = '''imagenet-1k-id2label.json'''
_UpperCAmelCase = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = img_size
_UpperCAmelCase = num_classes
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
return config
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
_UpperCAmelCase = '''layernorm.weight'''
if name == "norm.bias":
_UpperCAmelCase = '''layernorm.bias'''
if "head" in name:
_UpperCAmelCase = name.replace('''head''' , '''classifier''' )
else:
_UpperCAmelCase = '''swin.''' + name
return name
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase = key.split('''.''' )
_UpperCAmelCase = int(key_split[1] )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[
:dim
]
_UpperCAmelCase = val[
dim : dim * 2
]
_UpperCAmelCase = val[
-dim:
]
else:
_UpperCAmelCase = val
return orig_state_dict
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
_UpperCAmelCase = get_swin_config(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
_UpperCAmelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCAmelCase = timm_model(inputs['''pixel_values'''] )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A : Tuple = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 95
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : str = emb.weight.shape
lowercase_ : Optional[Any] = nn.Linear(a__ , a__ , bias=a__ )
lowercase_ : str = emb.weight.data
return lin_layer
def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=False ):
lowercase_ : int = torch.load(a__ , map_location='cpu' )['model']
remove_ignore_keys_(a__ )
lowercase_ : Tuple = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase_ : List[Any] = MBartConfig.from_pretrained(a__ , vocab_size=a__ )
if mbart_aa and finetuned:
lowercase_ : Any = 'relu'
lowercase_ : List[Any] = state_dict['decoder.embed_tokens.weight']
lowercase_ : str = MBartForConditionalGeneration(a__ )
model.model.load_state_dict(a__ )
if finetuned:
lowercase_ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 425
|
"""simple docstring"""
import itertools
import math
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCAmelCase ( a__ = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , a__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 553
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Any = """speech_to_text"""
snake_case_ : List[Any] = ["""past_key_values"""]
snake_case_ : Tuple = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , lowerCAmelCase : str=1_0000 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Dict=2048 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Any=4 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : str="relu" , lowerCAmelCase : Optional[Any]=256 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=2 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Union[str, Any]=6000 , lowerCAmelCase : int=1024 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Dict=(5, 5) , lowerCAmelCase : Optional[int]=1024 , lowerCAmelCase : int=80 , lowerCAmelCase : List[Any]=1 , **lowerCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
_snake_case : List[Any] = vocab_size
_snake_case : Optional[Any] = d_model
_snake_case : Union[str, Any] = encoder_ffn_dim
_snake_case : Optional[Any] = encoder_layers
_snake_case : Dict = encoder_attention_heads
_snake_case : Tuple = decoder_ffn_dim
_snake_case : Optional[int] = decoder_layers
_snake_case : Tuple = decoder_attention_heads
_snake_case : Tuple = dropout
_snake_case : Optional[int] = attention_dropout
_snake_case : Dict = activation_dropout
_snake_case : List[str] = activation_function
_snake_case : Any = init_std
_snake_case : List[Any] = encoder_layerdrop
_snake_case : Dict = decoder_layerdrop
_snake_case : Union[str, Any] = use_cache
_snake_case : List[str] = encoder_layers
_snake_case : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : List[str] = max_source_positions
_snake_case : Tuple = max_target_positions
_snake_case : Any = num_conv_layers
_snake_case : Dict = list(lowerCAmelCase)
_snake_case : Optional[Any] = conv_channels
_snake_case : Dict = input_feat_per_channel
_snake_case : int = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 198
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_snake_case : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
_snake_case : Tuple = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
_snake_case : Optional[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
_snake_case : Union[str, Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
_snake_case : Any = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
_snake_case : int = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
_snake_case : Union[str, Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
_snake_case : Any = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
_snake_case : Dict = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
_snake_case : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
_snake_case : Dict = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
_snake_case : str = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
_snake_case : int = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
_snake_case : Union[str, Any] = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_snake_case : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_snake_case : Union[str, Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_snake_case : Tuple = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
_snake_case : Any = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
_snake_case : str = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
_snake_case : List[Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
_snake_case : Optional[int] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
_snake_case : List[Any] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
_snake_case : Tuple = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def lowercase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
for key in orig_state_dict.copy().keys():
_snake_case : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : Any = key.split(""".""" )
_snake_case , _snake_case : List[Any] = int(key_split[2] ), int(key_split[4] )
_snake_case : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_snake_case : List[Any] = val[:dim, :]
_snake_case : Union[str, Any] = val[dim : dim * 2, :]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Union[str, Any] = val[:dim]
_snake_case : str = val[dim : dim * 2]
_snake_case : Dict = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_snake_case : int = key.split(""".""" )
_snake_case : Optional[int] = int(key_split[3] )
_snake_case : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_snake_case : int = val[:dim, :]
_snake_case : Tuple = val[
dim : dim * 2, :
]
_snake_case : int = val[-dim:, :]
else:
_snake_case : Tuple = val[:dim]
_snake_case : Tuple = val[dim : dim * 2]
_snake_case : str = val[-dim:]
else:
_snake_case : Tuple = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_snake_case : Any = val.squeeze_()
else:
_snake_case : Optional[int] = val
return orig_state_dict
def lowercase ( ) -> List[str]:
_snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_snake_case : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> Optional[Any]:
_snake_case : Any = GroupViTConfig()
_snake_case : List[Any] = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
_snake_case : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""]
_snake_case : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
_snake_case : List[Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
_snake_case : List[Any] = prepare_img()
_snake_case : int = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
with torch.no_grad():
_snake_case : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
_snake_case : Union[str, Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_snake_case : Union[str, Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
a__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 198
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , lowercase__ : Tuple , lowercase__ : Dict=3 , lowercase__ : Union[str, Any]=32 , lowercase__ : Union[str, Any]=3 , lowercase__ : Optional[Any]=10 , lowercase__ : Any=[8, 16, 32, 64] , lowercase__ : List[str]=[1, 1, 2, 1] , lowercase__ : Optional[int]=True , lowercase__ : int=True , lowercase__ : Dict="relu" , lowercase__ : int=3 , lowercase__ : Dict=None , lowercase__ : Any=["stage2", "stage3", "stage4"] , lowercase__ : Optional[Any]=[2, 3, 4] , lowercase__ : List[str]=1 , ):
'''simple docstring'''
a_ : int = parent
a_ : Optional[Any] = batch_size
a_ : Dict = image_size
a_ : List[str] = num_channels
a_ : Dict = embeddings_size
a_ : List[Any] = hidden_sizes
a_ : str = depths
a_ : str = is_training
a_ : Optional[int] = use_labels
a_ : Any = hidden_act
a_ : str = num_labels
a_ : Optional[Any] = scope
a_ : Optional[Any] = len(lowercase__ )
a_ : Tuple = out_features
a_ : Optional[Any] = out_indices
a_ : str = num_groups
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[Any] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.num_labels )
a_ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str ):
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Tuple ):
'''simple docstring'''
a_ : str = BitModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self : str , lowercase__ : str , lowercase__ : Any , lowercase__ : Union[str, Any] ):
'''simple docstring'''
a_ : Union[str, Any] = self.num_labels
a_ : Union[str, Any] = BitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : int = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Tuple ):
'''simple docstring'''
a_ : List[str] = BitBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : List[str] = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a_ : int = None
a_ : List[str] = BitBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ : Optional[Any] = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : List[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : int = config_and_inputs
a_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__ : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__magic_name__ : str = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Tuple = False
__magic_name__ : Any = False
__magic_name__ : str = False
__magic_name__ : List[Any] = False
__magic_name__ : Union[str, Any] = False
def lowercase_ ( self : Dict ):
'''simple docstring'''
a_ : Tuple = BitModelTester(self )
a_ : Union[str, Any] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowercase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(lowercase__ )
a_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[str] = [*signature.parameters.keys()]
a_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase__ )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[str] = model_class(config=lowercase__ )
for name, module in model.named_modules():
if isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Tuple ):
a_ : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
a_ : Any = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
a_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[str] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ : Any = layer_type
a_ : str = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Union[str, Any] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = BitModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
a_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : int = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
a_ : Any = self.default_image_processor
a_ : Any = prepare_img()
a_ : int = image_processor(images=lowercase__ , return_tensors="""pt""" ).to(lowercase__ )
# forward pass
with torch.no_grad():
a_ : int = model(**lowercase__ )
# verify the logits
a_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
a_ : Any = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
__magic_name__ : Tuple = (BitBackbone,) if is_torch_available() else ()
__magic_name__ : List[Any] = BitConfig
__magic_name__ : Union[str, Any] = False
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : str = BitModelTester(self )
| 442
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowercase__ : Dict[str, int] , lowercase__ : List[str] , lowercase__ : int = None , lowercase__ : int = None ):
'''simple docstring'''
super().__init__()
a_ : Optional[Any] = pad_token_id
a_ : str = max_length
a_ : List[str] = vocab
a_ : Union[str, Any] = merges
a_ : Union[str, Any] = BytePairTokenizer(lowercase__ , lowercase__ , sequence_length=lowercase__ )
@classmethod
def lowercase_ ( cls : List[Any] , lowercase__ : GPTaTokenizer , *lowercase__ : Optional[Any] , **lowercase__ : int ):
'''simple docstring'''
a_ : str = [""" """.join(lowercase__ ) for m in tokenizer.bpe_ranks.keys()]
a_ : Any = tokenizer.get_vocab()
return cls(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ )
@classmethod
def lowercase_ ( cls : Dict , lowercase__ : Union[str, os.PathLike] , *lowercase__ : str , **lowercase__ : Optional[int] ):
'''simple docstring'''
a_ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase__ , *lowercase__ , **lowercase__ )
return cls.from_tokenizer(lowercase__ , *lowercase__ , **lowercase__ )
@classmethod
def lowercase_ ( cls : Dict , lowercase__ : Optional[Any] ):
'''simple docstring'''
return cls(**lowercase__ )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase_ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : int = None ):
'''simple docstring'''
a_ : List[str] = self.tf_tokenizer(lowercase__ )
a_ : int = tf.ones_like(lowercase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
a_ : int = max_length if max_length is not None else self.max_length
if max_length is not None:
a_ , a_ : int = pad_model_inputs(
lowercase__ , max_seq_length=lowercase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 442
| 1
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * limit
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < limit:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase_ = prime_sieve(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
for i in range(len(__lowerCAmelCase ) ):
for j in range(i + length , len(__lowerCAmelCase ) ):
lowercase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ = j - i
lowercase_ = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 100
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=1_3 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : List[str]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple="None" , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = relative_attention
lowercase_ = position_biased_input
lowercase_ = pos_att_type
lowercase_ = scope
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase_ = None
if self.use_input_mask:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase_ = ids_tensor([self.batch_size] , self.num_choices)
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
lowercase_ = DebertaVaModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_)[0]
lowercase_ = model(lowerCAmelCase_)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMaskedLM(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForSequenceClassification(lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
lowercase_ = self.num_labels
lowercase_ = DebertaVaForTokenClassification(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = DebertaVaForQuestionAnswering(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaForMultipleChoice(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowercase_ = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = DebertaVaModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DebertaVaModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""")
lowercase_ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]])
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase_ = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_)[0]
# compare the actual values for a slice.
lowercase_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4) , F'''{output[:, 1:4, 1:4]}''')
| 100
| 1
|
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self : Any , __A : list ):
__UpperCamelCase = set_counts
__UpperCamelCase = max(A_ )
__UpperCamelCase = len(A_ )
__UpperCamelCase = [1] * num_sets
__UpperCamelCase = list(range(A_ ) )
def _lowerCamelCase ( self : str , __A : int , __A : int ):
__UpperCamelCase = self.get_parent(A_ )
__UpperCamelCase = self.get_parent(A_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCamelCase = 0
__UpperCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCamelCase = 0
__UpperCamelCase = src_parent
__UpperCamelCase = self.set_counts[src_parent]
__UpperCamelCase = max(self.max_set , A_ )
return True
def _lowerCamelCase ( self : Any , __A : int ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 399
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''segformer'''
def __init__( self : Optional[Any] , A_ : Tuple=3 , A_ : int=4 , A_ : int=[2, 2, 2, 2] , A_ : Any=[8, 4, 2, 1] , A_ : str=[32, 64, 160, 256] , A_ : str=[7, 3, 3, 3] , A_ : Dict=[4, 2, 2, 2] , A_ : List[str]=[1, 2, 5, 8] , A_ : Union[str, Any]=[4, 4, 4, 4] , A_ : Union[str, Any]="gelu" , A_ : int=0.0 , A_ : Tuple=0.0 , A_ : List[str]=0.1 , A_ : Tuple=0.02 , A_ : Optional[Any]=0.1 , A_ : int=1E-6 , A_ : Optional[int]=256 , A_ : Tuple=255 , **A_ : str , ) -> str:
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , A_ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('''reshape_last_stage''' , A_ )
__snake_case = semantic_loss_ignore_index
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = version.parse('''1.11''' )
@property
def lowercase ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase ( self : List[Any] ) -> float:
return 1E-4
@property
def lowercase ( self : Any ) -> int:
return 12
| 564
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
from copy import deepcopy
class _lowercase :
def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None ) -> None:
if arr is None and size is not None:
lowerCamelCase : Any = size
lowerCamelCase : Optional[int] = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError('Either arr or size must be specified' )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase ( self ) -> list[int]:
lowerCamelCase : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase : List[str] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index + (index & (-index))
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index - (index & (-index))
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
if right == 0:
return 0
lowerCamelCase : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase : Optional[int] = self.prev(UpperCAmelCase_ )
return result
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
return self.query(UpperCAmelCase_ , index + 1 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase : str = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=7 , _SCREAMING_SNAKE_CASE : int=3 , _SCREAMING_SNAKE_CASE : str=18 , _SCREAMING_SNAKE_CASE : List[Any]=30 , _SCREAMING_SNAKE_CASE : List[Any]=400 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'shortest_edge': 18}
UpperCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = LevitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = LevitImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Tuple = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 280
| 1
|
from math import pi, sqrt
def lowerCAmelCase_ ( lowerCamelCase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_7_1.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCAmelCase_ ( ):
assert gamma(0.5 ) == sqrt(lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : List[str] = 1.0
while num:
UpperCAmelCase_ : Tuple = float(input("Gamma of: "))
print(F"""gamma({num}) = {gamma(num)}""")
print("\nEnter 0 to exit...")
| 367
|
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367
| 1
|
'''simple docstring'''
from manim import *
class lowercase_ ( __snake_case ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("CPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("GPU" , font_size=2_4 )
_SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("Model" , font_size=2_4 )
_SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=2_4 )
_SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_SCREAMING_SNAKE_CASE = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowercase_ ):
_SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 418
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ )
| 670
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase__ = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(a_ , a_ )
def _UpperCamelCase ( self : Optional[Any] , **a_ : List[str] ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **a_ )
def _UpperCamelCase ( self : List[str] , **a_ : Optional[int] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase__ = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase__ = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
lowerCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = image_processor(a_ , return_tensors="""np""" )
lowerCamelCase__ = processor(images=a_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = processor(text=a_ )
lowerCamelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(a_ ):
processor()
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
lowerCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ = processor.batch_decode(a_ )
lowerCamelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = self.get_image_processor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = VisionTextDualEncoderProcessor(tokenizer=a_ , image_processor=a_ )
lowerCamelCase__ = """lower newer"""
lowerCamelCase__ = self.prepare_image_inputs()
lowerCamelCase__ = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 720
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Dict , a_ : Any , a_ : str ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _UpperCamelCase ( self : Dict , a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Any , a_ : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _UpperCamelCase ( self : List[Any] , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def snake_case ():
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case ():
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 235
| 0
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.functional.normalize(lowercase )
SCREAMING_SNAKE_CASE_: str =nn.functional.normalize(lowercase )
return torch.mm(lowercase , normalized_text_embeds.t() )
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = CLIPConfig
UpperCamelCase : Any = ['CLIPEncoderLayer']
def __init__( self : Tuple , lowerCAmelCase : CLIPConfig ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.vision_model(lowerCAmelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE_: Dict =self.visual_projection(lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_: int =cosine_distance(lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: str =cosine_distance(lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Optional[int] =[]
SCREAMING_SNAKE_CASE_: Optional[Any] =image_embeds.shape[0]
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] ={"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: Any =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_: Dict =special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Optional[Any] =self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: Any =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
SCREAMING_SNAKE_CASE_: str =0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE_: int =cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Tuple =self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: str =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase )
result.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.vision_model(lowerCAmelCase )[1] # pooled_output
SCREAMING_SNAKE_CASE_: Dict =self.visual_projection(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =cosine_distance(lowerCAmelCase , self.special_care_embeds )
SCREAMING_SNAKE_CASE_: Union[str, Any] =cosine_distance(lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: Any =0.0
SCREAMING_SNAKE_CASE_: Optional[int] =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =special_care * 0.0_1
SCREAMING_SNAKE_CASE_: Tuple =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE_: Tuple =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: List[str] =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 409
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __magic_name__ ( lowercase=32 , lowercase=10 , lowercase=100 , lowercase=1026 , lowercase=True , lowercase="data/tokenized_stories_train_wikitext103.jbl" , lowercase="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =generate_datasets(
lowercase , lowercase , number=lowercase , min_len=1026 , trim=lowercase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE_: Any =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
SCREAMING_SNAKE_CASE_: List[str] =load_gpta("""gpt2""" ).to(lowercase )
print("""computing perplexity on objective set""" )
SCREAMING_SNAKE_CASE_: Dict =compute_perplexity(lowercase , lowercase , lowercase ).item()
print("""perplexity on objective set:""" , lowercase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __magic_name__ ( lowercase , lowercase=15 , lowercase=128 , lowercase=100 , lowercase="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE_: Any =GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE_: Optional[Any] =SecondaryLearner(lowercase )
# Train secondary learner
SCREAMING_SNAKE_CASE_: Dict =train_secondary_learner(
lowercase , lowercase , max_epochs=lowercase , batch_size=lowercase , eval_freq=100 , igf_model_path=lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=32 , lowercase=1000 , lowercase=16 , lowercase=1.0 , lowercase=recopy_gpta , lowercase=None , lowercase=10 , lowercase="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
SCREAMING_SNAKE_CASE_: str =RandomSampler(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =DataLoader(lowercase , sampler=lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =max_steps // (len(lowercase )) + 1
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros((1, context_len) , dtype=torch.long , device=lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =recopy_model(lowercase , lowercase , lowercase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowercase )
secondary_learner.eval()
SCREAMING_SNAKE_CASE_: Optional[int] =[]
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: str =[]
SCREAMING_SNAKE_CASE_: Dict =[]
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE_: Union[str, Any] =compute_perplexity(lowercase , lowercase , lowercase )
test_perps.append(lowercase )
print("""Test perplexity, step""" , lowercase , """:""" , lowercase )
for epoch in range(int(lowercase ) ):
for step, example in enumerate(lowercase ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_: Dict =random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE_: Any =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE_: Dict =model(lowercase , labels=lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =secondary_learner.forward(
torch.tensor(lowercase , dtype=torch.long , device=lowercase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowercase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE_: List[str] =-1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE_: Any =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE_: List[Any] =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_: Optional[Any] =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE_: List[Any] =compute_perplexity(lowercase , lowercase , lowercase )
test_perps.append(lowercase )
print("""Test perplexity, step""" , lowercase , """:""" , lowercase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowercase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowercase , type=lowercase , required=lowercase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowercase , type=lowercase , required=lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowercase , default=lowercase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowercase , default=lowercase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowercase , type=lowercase , required=lowercase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowercase , type=lowercase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowercase , default=lowercase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=lowercase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=lowercase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=lowercase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=lowercase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=lowercase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=lowercase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=lowercase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=lowercase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=lowercase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowercase , type=lowercase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowercase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowercase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowercase , type=lowercase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowercase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE_: str =joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
SCREAMING_SNAKE_CASE_: List[str] =training_secondary_learner(
lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE_: int =GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=lowercase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowercase , lowercase , lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowercase , secondary_learner=lowercase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 409
| 1
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a_ :Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(A__ , A__ )
a_ :Dict = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def a ( A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_key.replace(A__ , A__ )
print(f"""{key} -> {new_key}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = s_dict.pop(A__ )
return s_dict
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE__ : int = emb.weight.data
return lin_layer
def a ( A__ , A__ ) -> bytes:
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.basename(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = url.split('''/''' )[-2]
SCREAMING_SNAKE_CASE__ : Any = os.path.join(A__ , A__ )
if os.path.exists(A__ ) and not os.path.isfile(A__ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = open(A__ , '''rb''' ).read()
if hashlib.shaaaa(A__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(A__ ) as source, open(A__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=A__ , unit_divisor=1_0_2_4 ) as loop:
while True:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = source.read(8_1_9_2 )
if not buffer:
break
output.write(A__ )
loop.update(len(A__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = open(A__ , '''rb''' ).read()
if hashlib.shaaaa(A__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def a ( A__ , A__ ) -> Dict:
'''simple docstring'''
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE__ : int = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(A__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ : str = original_checkpoint['''dims''']
SCREAMING_SNAKE_CASE__ : Tuple = original_checkpoint['''model_state_dict''']
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(A__ )
rename_keys(A__ )
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
SCREAMING_SNAKE_CASE__ : Dict = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=A__ , decoder_ffn_dim=A__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
SCREAMING_SNAKE_CASE__ : Tuple = WhisperForConditionalGeneration(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = model.model.load_state_dict(A__ , strict=A__ )
if len(A__ ) > 0 and not set(A__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
SCREAMING_SNAKE_CASE__ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = proj_out_weights
model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ :List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 250
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a ( A__ ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = np.max(_outputs , axis=-1 , keepdims=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A__ )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = '''sigmoid'''
lowerCamelCase : int = '''softmax'''
lowerCamelCase : Any = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = ClassificationFunction.NONE
def __init__( self : Dict , **_lowercase : Any ):
super().__init__(**_lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase__ ( self : Any , _lowercase : Dict=None , _lowercase : str=None , _lowercase : int="" , **_lowercase : Optional[int] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_kwargs
SCREAMING_SNAKE_CASE__ : Any = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
SCREAMING_SNAKE_CASE__ : str = self.model.config.return_all_scores
if isinstance(_lowercase , _lowercase ) or top_k is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = top_k
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _lowercase , )
if return_all_scores:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
SCREAMING_SNAKE_CASE__ : int = 1
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE__ : Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *_lowercase : str , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(*_lowercase , **_lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''top_k''' not in kwargs
if isinstance(args[0] , _lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase__ ( self : List[Any] , _lowercase : List[Any] , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.framework
if isinstance(_lowercase , _lowercase ):
return self.tokenizer(**_lowercase , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1 and isinstance(inputs[0] , _lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
def lowercase__ ( self : Union[str, Any] , _lowercase : List[Any] ):
return self.model(**_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Dict , _lowercase : Optional[int]=None , _lowercase : Optional[int]=1 , _lowercase : str=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''logits'''][0]
SCREAMING_SNAKE_CASE__ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sigmoid(_lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE__ : Dict = softmax(_lowercase )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE__ : Tuple = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = dict_scores[:top_k]
return dict_scores
| 250
| 1
|
from math import sqrt
def __snake_case ( lowerCAmelCase_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_1 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(F'{solution() = }')
| 100
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : List[str] = DetaImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetaImageProcessor()
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetaImageProcessor(format="coco_panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase__ ( _A ):
'''simple docstring'''
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__A , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__A , """num_encoder_blocks""" ) )
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self : Any , __A : int , __A : Optional[Any]=13 , __A : Dict=64 , __A : Optional[Any]=3 , __A : Tuple=4 , __A : int=[2, 2, 2, 2] , __A : List[str]=[8, 4, 2, 1] , __A : Dict=[16, 32, 64, 128] , __A : Dict=[1, 4, 8, 16] , __A : Tuple=[1, 2, 4, 8] , __A : Dict=True , __A : Optional[Any]=True , __A : Tuple="gelu" , __A : Optional[Any]=0.1 , __A : Any=0.1 , __A : Dict=0.0_2 , __A : Dict=3 , __A : Any=None , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_encoder_blocks
lowerCAmelCase__ = sr_ratios
lowerCAmelCase__ = depths
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = downsampling_rates
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Union[str, Any] , __A : Optional[int] , __A : Optional[int] , __A : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = SegformerModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A )
lowerCAmelCase__ = lowerCAmelCase__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : Optional[Any] , __A : Union[str, Any] , __A : Optional[int] , __A : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = model(__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ = model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Tuple , __A : List[str] , __A : Dict , __A : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__A )
lowerCAmelCase__ = model(__A , labels=__A )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( _A, _A, unittest.TestCase ):
'''simple docstring'''
A__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A__ = True
A__ = False
A__ = False
A__ = False
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = SegformerModelTester(self )
lowerCAmelCase__ = SegformerConfigTester(self , config_class=__A )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__A )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = sum(self.model_tester.depths )
self.assertEqual(len(__A ) , __A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ = len(__A )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(__A ) , __A )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__A : Dict , __A : Union[str, Any] , __A : int ):
lowerCAmelCase__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) , __A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(__A , __A , __A )
def lowercase__ ( self : List[str] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
lowerCAmelCase__ = model_class(__A )
model.to(__A )
model.train()
lowerCAmelCase__ = self._prepare_for_class(__A , __A , return_labels=__A )
lowerCAmelCase__ = model(**__A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowerCAmelCase( ) -> List[Any]:
lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__A , return_tensors="""pt""" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
lowerCAmelCase__ = model(__A )
lowerCAmelCase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase__ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__A )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__A , return_tensors="""pt""" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
lowerCAmelCase__ = model(__A )
lowerCAmelCase__ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase__ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __A , atol=1E-1 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__A , align=__A , do_random_crop=__A )
lowerCAmelCase__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__A )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__A , return_tensors="""pt""" )
lowerCAmelCase__ = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
lowerCAmelCase__ = model(__A )
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(500, 300)] )
lowerCAmelCase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __A )
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__A )
lowerCAmelCase__ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __A )
| 211
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_UpperCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_UpperCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowercase__ ( self : Optional[int] , __A : str , __A : Optional[Any] , __A : Dict=None , __A : Union[str, Any]=1 , __A : Tuple="binary" , __A : str=None ) -> str:
'''simple docstring'''
lowerCAmelCase__ = fa_score(
__A , __A , labels=__A , pos_label=__A , average=__A , sample_weight=__A )
return {"f1": float(__A ) if score.size == 1 else score}
| 211
| 1
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def UpperCamelCase__ ( _lowercase : str ) -> Optional[int]:
__UpperCAmelCase: int = git.Repo(search_parent_directories=_lowercase )
__UpperCAmelCase: List[Any] = {
"""repo_id""": str(_lowercase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(_lowercase , """git_log.json""" ) , """w""" ) as f:
json.dump(_lowercase , _lowercase , indent=4 )
def UpperCamelCase__ ( _lowercase : Dict ) -> Union[str, Any]:
if params.n_gpu <= 0:
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Tuple = -1
__UpperCAmelCase: Union[str, Any] = True
__UpperCAmelCase: Optional[int] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__UpperCAmelCase: List[str] = int(os.environ["""WORLD_SIZE"""] )
__UpperCAmelCase: Optional[Any] = int(os.environ["""N_GPU_NODE"""] )
__UpperCAmelCase: Optional[Any] = int(os.environ["""RANK"""] )
# number of nodes / node ID
__UpperCAmelCase: Any = params.world_size // params.n_gpu_per_node
__UpperCAmelCase: Optional[Any] = params.global_rank // params.n_gpu_per_node
__UpperCAmelCase: Dict = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__UpperCAmelCase: Tuple = 1
__UpperCAmelCase: Any = 0
__UpperCAmelCase: Dict = 0
__UpperCAmelCase: List[str] = 0
__UpperCAmelCase: Union[str, Any] = 1
__UpperCAmelCase: List[Any] = 1
__UpperCAmelCase: Optional[Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__UpperCAmelCase: Dict = params.node_id == 0 and params.local_rank == 0
__UpperCAmelCase: Optional[int] = params.n_nodes > 1
# summary
__UpperCAmelCase: Union[str, Any] = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def UpperCamelCase__ ( _lowercase : Tuple ) -> Dict:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 523
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
class a :
"""simple docstring"""
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = ["""dtype"""]
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def lowercase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[str] = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , """create_state""" ) and getattr(snake_case_ , """has_state""" , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
'''simple docstring'''
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase: Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase: Union[str, Any] = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCamelCase__ ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : List[str]=0.9_99 , _lowercase : List[Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowercase : Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__UpperCAmelCase: Tuple = []
for i in range(_lowercase ):
__UpperCAmelCase: Any = i / num_diffusion_timesteps
__UpperCAmelCase: Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@classmethod
def lowercase_ ( cls , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase: Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase: Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase: List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase: Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__UpperCAmelCase: List[Any] = 1.0 - betas
__UpperCAmelCase: Tuple = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Any:
__UpperCAmelCase: Optional[int] = state.alphas_cumprod
__UpperCAmelCase: Any = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase: str = sqrt_alpha_prod.flatten()
__UpperCAmelCase: Union[str, Any] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
__UpperCAmelCase: List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase: Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase: List[str] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Union[str, Any]:
__UpperCAmelCase, __UpperCAmelCase: Tuple = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Optional[Any]:
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 523
| 1
|
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = []
__lowercase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowercase = len(_UpperCamelCase ) if (len(_UpperCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(_UpperCamelCase ) , '''Postfix'''.center(_UpperCamelCase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase ) == 0:
stack.append(_UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_UpperCamelCase ) # push x to stack
print(
x.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
while len(_UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , (''''''.join(_UpperCamelCase )).ljust(_UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(_UpperCamelCase ) # return Postfix as str
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_UpperCamelCase ) ):
if infix[i] == "(":
__lowercase = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowercase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(_UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
a : Union[str, Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
a : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 714
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["speech"]
def __init__( self , *snake_case_ , **snake_case_ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["speech"]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
| 527
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Optional[int] = 1
UpperCAmelCase__ :List[str] = 3
UpperCAmelCase__ :Optional[int] = (3_2, 3_2)
UpperCAmelCase__ :List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
torch.manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase__ :List[Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCAmelCase__ :Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
return CLIPTextModel(__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ :Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ :Union[str, Any] = self.dummy_cond_unet_upscale
UpperCAmelCase__ :List[Any] = DDPMScheduler()
UpperCAmelCase__ :Optional[int] = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase__ :List[str] = self.dummy_vae
UpperCAmelCase__ :Tuple = self.dummy_text_encoder
UpperCAmelCase__ :int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ :List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ :int = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ :str = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , )
UpperCAmelCase__ :List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :str = "A painting of a squirrel eating a burger"
UpperCAmelCase__ :int = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Optional[Any] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ :Tuple = output.images
UpperCAmelCase__ :List[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :Dict = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , return_dict=__lowerCamelCase , )[0]
UpperCAmelCase__ :Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ :Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase__ :int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase__ :int = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ :Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase__ :Tuple = DDPMScheduler()
UpperCAmelCase__ :Any = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase__ :List[Any] = self.dummy_vae
UpperCAmelCase__ :List[str] = self.dummy_text_encoder
UpperCAmelCase__ :List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ :List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ :Optional[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ :Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , )
UpperCAmelCase__ :Optional[int] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ :Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ :Any = output.images
assert image.shape[0] == 2
UpperCAmelCase__ :Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCAmelCase__ :int = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ :Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ :int = self.dummy_cond_unet_upscale
UpperCAmelCase__ :Tuple = DDPMScheduler()
UpperCAmelCase__ :Any = DDIMScheduler(prediction_type='''v_prediction''' )
UpperCAmelCase__ :Any = self.dummy_vae
UpperCAmelCase__ :Tuple = self.dummy_text_encoder
UpperCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ :List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ :str = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase__ :Dict = unet.half()
UpperCAmelCase__ :List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ :Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_5_0 , )
UpperCAmelCase__ :Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ :Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type='''np''' , ).images
UpperCAmelCase__ :List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase__ :int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
UpperCAmelCase__ :Any = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ :Dict = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ :Optional[int] = "a cat sitting on a park bench"
UpperCAmelCase__ :List[str] = torch.manual_seed(0 )
UpperCAmelCase__ :Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
UpperCAmelCase__ :str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase__ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
UpperCAmelCase__ :int = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ :List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCAmelCase__ :int = "a cat sitting on a park bench"
UpperCAmelCase__ :Optional[int] = torch.manual_seed(0 )
UpperCAmelCase__ :Tuple = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type='''np''' , )
UpperCAmelCase__ :int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __SCREAMING_SNAKE_CASE ( self : Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
UpperCAmelCase__ :List[str] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase__ :List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ :int = "a cat sitting on a park bench"
UpperCAmelCase__ :int = torch.manual_seed(0 )
UpperCAmelCase__ :Union[str, Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type='''np''' , )
UpperCAmelCase__ :List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 467
|
lowerCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase__ = [None] * 10_00_00_00
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def _UpperCAmelCase (UpperCamelCase__ : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_A : Union[str, Any] = chain(next_number(UpperCamelCase__ ) )
_A : Optional[int] = number_chain
while number < 10000000:
_A : Optional[Any] = number_chain
number *= 10
return number_chain
def _UpperCAmelCase (UpperCamelCase__ : int = 10000000 ):
for i in range(1 , UpperCamelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 503
| 0
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCAmelCase = 0
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
while len(_SCREAMING_SNAKE_CASE ) != 1:
lowerCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for i in num_string]
lowerCAmelCase = 1
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
total *= numbers[i]
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
steps += 1
return steps
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCAmelCase = 0
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
while len(_SCREAMING_SNAKE_CASE ) != 1:
lowerCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for i in num_string]
lowerCAmelCase = 0
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
total += numbers[i]
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase = TextIteratorStreamer(A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase = tokenizer(A_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = TextIteratorStreamer(A_ , timeout=0.0_0_1 )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
| 344
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : int = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = '''pix2struct_text_model'''
UpperCamelCase_ : List[str] = ['''past_key_values''']
UpperCamelCase_ : List[str] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , A_ : int=50_244 , A_ : Tuple=768 , A_ : int=64 , A_ : List[str]=2_048 , A_ : Tuple=12 , A_ : Tuple=12 , A_ : Tuple=32 , A_ : Optional[Any]=128 , A_ : Tuple=0.1 , A_ : List[str]=1E-6 , A_ : Dict=1.0 , A_ : List[Any]="gelu_new" , A_ : int=0 , A_ : Tuple=False , A_ : Any=0 , A_ : Union[str, Any]=1 , A_ : Optional[int]=False , A_ : Any=True , **A_ : Union[str, Any] , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = d_kv
__snake_case = d_ff
__snake_case = num_layers
__snake_case = num_heads
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = initializer_factor
__snake_case = use_cache
__snake_case = eos_token_id
__snake_case = decoder_start_token_id
# for backwards compatibility
__snake_case = dense_act_fn
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , tie_word_embeddings=A_ , is_decoder=A_ , **A_ , )
@classmethod
def lowercase ( cls : Optional[int] , A_ : Union[str, os.PathLike] , **A_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__snake_case , __snake_case = cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__snake_case = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A_ , **A_ )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''pix2struct_vision_model'''
def __init__( self : Any , A_ : List[str]=768 , A_ : Any=768 , A_ : Optional[Any]=2_048 , A_ : Dict=64 , A_ : Optional[int]=12 , A_ : int=12 , A_ : List[Any]="gelu_new" , A_ : str=1E-6 , A_ : Any=0.0 , A_ : List[str]=0.0 , A_ : List[str]=1E-1_0 , A_ : List[Any]=1.0 , A_ : Any=4_096 , A_ : Optional[Any]=32 , A_ : List[Any]=128 , **A_ : List[str] , ) -> Union[str, Any]:
super().__init__(**A_ )
__snake_case = hidden_size
__snake_case = patch_embed_hidden_size
__snake_case = d_ff
__snake_case = dropout_rate
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = initializer_range
__snake_case = initializer_factor
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = dense_act_fn
__snake_case = seq_len
__snake_case = relative_attention_num_buckets
__snake_case = relative_attention_max_distance
__snake_case = d_kv
@classmethod
def lowercase ( cls : Optional[Any] , A_ : Union[str, os.PathLike] , **A_ : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__snake_case , __snake_case = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
__snake_case = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A_ , **A_ )
class _A ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = '''pix2struct'''
UpperCamelCase_ : List[Any] = True
def __init__( self : Optional[int] , A_ : Optional[Any]=None , A_ : List[Any]=None , A_ : int=1.0 , A_ : Any=0.02 , A_ : Union[str, Any]=False , A_ : int=False , A_ : Union[str, Any]=True , **A_ : Union[str, Any] , ) -> List[Any]:
super().__init__(tie_word_embeddings=A_ , is_encoder_decoder=A_ , **A_ )
if text_config is None:
__snake_case = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
__snake_case = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
__snake_case = PixaStructTextConfig(**A_ )
__snake_case = PixaStructVisionConfig(**A_ )
__snake_case = self.text_config.decoder_start_token_id
__snake_case = self.text_config.pad_token_id
__snake_case = self.text_config.eos_token_id
__snake_case = initializer_factor
__snake_case = initializer_range
__snake_case = self.initializer_range
__snake_case = self.initializer_range
__snake_case = is_vqa
@classmethod
def lowercase ( cls : List[str] , A_ : PixaStructTextConfig , A_ : PixaStructVisionConfig , **A_ : Optional[Any] ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : List[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["ConvNextFeatureExtractor"]
__lowercase : Union[str, Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 564
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=13 , snake_case_ : List[Any]=7 , snake_case_ : List[Any]=True , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=99 , snake_case_ : List[Any]=32 , snake_case_ : str=2 , snake_case_ : Dict=4 , snake_case_ : str=37 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : str=0.1 , snake_case_ : int=512 , snake_case_ : Optional[int]=16 , snake_case_ : Optional[int]=2 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : List[str]=4 , snake_case_ : Tuple=None , ):
snake_case__ : List[str] = parent
snake_case__ : List[Any] = 13
snake_case__ : Union[str, Any] = 7
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = True
snake_case__ : Union[str, Any] = True
snake_case__ : str = True
snake_case__ : List[Any] = 99
snake_case__ : Dict = 32
snake_case__ : Any = 2
snake_case__ : List[Any] = 4
snake_case__ : int = 37
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : str = 0.1
snake_case__ : int = 0.1
snake_case__ : int = 512
snake_case__ : Tuple = 16
snake_case__ : Optional[Any] = 2
snake_case__ : Optional[int] = 0.02
snake_case__ : Optional[Any] = 3
snake_case__ : Tuple = 4
snake_case__ : Dict = None
def lowerCamelCase ( self : Any ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple = None
if self.use_input_mask:
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Dict = None
snake_case__ : Optional[int] = None
snake_case__ : int = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : int ):
snake_case__ : Optional[Any] = TFRoFormerModel(config=snake_case_ )
snake_case__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : int = [input_ids, input_mask]
snake_case__ : Optional[Any] = model(snake_case_ )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : List[str] ):
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = TFRoFormerForCausalLM(config=snake_case_ )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : List[str] = model(snake_case_ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase ( self : List[str] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
snake_case__ : Union[str, Any] = TFRoFormerForMaskedLM(config=snake_case_ )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any ):
snake_case__ : Any = self.num_labels
snake_case__ : Any = TFRoFormerForSequenceClassification(config=snake_case_ )
snake_case__ : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : str , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
snake_case__ : List[str] = self.num_choices
snake_case__ : List[Any] = TFRoFormerForMultipleChoice(config=snake_case_ )
snake_case__ : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
snake_case__ : List[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : str , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Dict ):
snake_case__ : int = self.num_labels
snake_case__ : Any = TFRoFormerForTokenClassification(config=snake_case_ )
snake_case__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Tuple , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple ):
snake_case__ : str = TFRoFormerForQuestionAnswering(config=snake_case_ )
snake_case__ : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[str] = config_and_inputs
snake_case__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = TFRoFormerModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[Any] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(snake_case_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case__ : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Optional[Any] = model(snake_case_ )[0]
# TODO Replace vocab size
snake_case__ : Dict = 50_000
snake_case__ : Any = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
snake_case__ : Optional[Any] = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = 1e-4
def lowerCamelCase ( self : Tuple ):
snake_case__ : Union[str, Any] = tf.constant([[4, 10]] )
snake_case__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
snake_case__ : Dict = emba(input_ids.shape )
snake_case__ : int = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(snake_case_ , snake_case_ , atol=self.tolerance )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
snake_case__ : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
snake_case__ : int = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case_ , snake_case_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
lowercase = 1e-4
def lowerCamelCase ( self : Union[str, Any] ):
# 2,12,16,64
snake_case__ : List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case__ : Any = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
snake_case__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
snake_case__ : int = embed_positions([2, 16, 768] )[None, None, :, :]
snake_case__ , snake_case__ : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
snake_case__ : Dict = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case_ , atol=self.tolerance )
| 301
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = "T5Config"
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> jnp.ndarray:
snake_case__ : int = jnp.zeros_like(_lowerCAmelCase )
snake_case__ : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
snake_case__ : List[str] = shifted_input_ids.at[:, 0].set(_lowerCAmelCase )
snake_case__ : List[str] = jnp.where(shifted_input_ids == -100 , _lowerCAmelCase , _lowerCAmelCase )
return shifted_input_ids
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "mt5"
lowercase = MTaConfig
| 301
| 1
|
from math import pow, sqrt
def UpperCamelCase ( *snake_case__):
lowerCAmelCase_ : Any = len(snake_case__) > 0 and all(value > 0.0 for value in values)
return result
def UpperCamelCase ( snake_case__ , snake_case__):
return (
round(sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2) / molar_mass , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
| 659
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659
| 1
|
'''simple docstring'''
def snake_case ( snake_case : list , snake_case : int , snake_case : int = 0 , snake_case : int = 0 ) -> int:
"""simple docstring"""
lowerCAmelCase = right or len(snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case , snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514
|
'''simple docstring'''
_UpperCamelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCamelCase : str = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCamelCase : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 514
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
SCREAMING_SNAKE_CASE__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
SCREAMING_SNAKE_CASE__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def _a ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : int = CHRF.CHAR_ORDER , _snake_case : int = CHRF.WORD_ORDER , _snake_case : int = CHRF.BETA , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A__ = [[refs[i] for refs in references] for i in range(_snake_case )]
A__ = CHRF(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
A__ = sb_chrf.corpus_score(_snake_case , _snake_case )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 9
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A__ = ModelCheckpoint(
dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ):
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , 'a+' ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_snake_case , torch.Tensor ):
A__ = val.item()
A__ = F'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_snake_case )
@rank_zero_only
def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , 'test' )
@rank_zero_only
def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 9
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__lowerCamelCase = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__lowerCamelCase = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
__lowerCamelCase = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
__lowerCamelCase = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
__lowerCamelCase = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
__lowerCamelCase = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 707
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """lxmert"""
_lowerCamelCase = {}
def __init__( self ,lowercase=30522 ,lowercase=768 ,lowercase=12 ,lowercase=9500 ,lowercase=1600 ,lowercase=400 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=512 ,lowercase=2 ,lowercase=0.02 ,lowercase=1E-12 ,lowercase=9 ,lowercase=5 ,lowercase=5 ,lowercase=2048 ,lowercase=4 ,lowercase=6.67 ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = num_qa_labels
UpperCAmelCase_ : str = num_object_labels
UpperCAmelCase_ : Dict = num_attr_labels
UpperCAmelCase_ : Tuple = l_layers
UpperCAmelCase_ : Tuple = x_layers
UpperCAmelCase_ : int = r_layers
UpperCAmelCase_ : Optional[Any] = visual_feat_dim
UpperCAmelCase_ : List[Any] = visual_pos_dim
UpperCAmelCase_ : int = visual_loss_normalizer
UpperCAmelCase_ : str = task_matched
UpperCAmelCase_ : str = task_mask_lm
UpperCAmelCase_ : int = task_obj_predict
UpperCAmelCase_ : List[str] = task_qa
UpperCAmelCase_ : Optional[int] = visual_obj_loss
UpperCAmelCase_ : List[str] = visual_attr_loss
UpperCAmelCase_ : str = visual_feat_loss
UpperCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase)
| 455
| 0
|
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
A_ : str =2_9_9_7_9_2_4_5_8
# Symbols
A_ , A_ , A_ , A_ : Dict =symbols("""ct x y z""")
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> float:
return 1 / sqrt(1 - beta(snake_case ) ** 2 )
def SCREAMING_SNAKE_CASE_ ( snake_case : float )-> np.ndarray:
return np.array(
[
[gamma(snake_case ), -gamma(snake_case ) * beta(snake_case ), 0, 0],
[-gamma(snake_case ) * beta(snake_case ), gamma(snake_case ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : np.ndarray | None = None )-> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(snake_case ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
A_ : int =transform(2_9_9_7_9_2_4_5)
print("""Example of four vector: """)
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
A_ : Union[str, Any] ={ct: c, x: 1, y: 1, z: 1}
A_ : Tuple =[four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 650
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float )-> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = XLMRobertaTokenizer
a__ = XLMRobertaTokenizerFast
a__ = True
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__: int = XLMRobertaTokenizer(lowercase , keep_accents=lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Dict = '<pad>'
a__: Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 10_02)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[str] = XLMRobertaTokenizer(lowercase , keep_accents=lowercase)
a__: str = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
a__: Any = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a__: List[str] = tokenizer.convert_tokens_to_ids(lowercase)
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a__: Dict = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a__: Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__: Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__: int = tempfile.mkdtemp()
a__: Tuple = tokenizer_r.save_pretrained(lowercase)
a__: Optional[Any] = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
a__: int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__: Union[str, Any] = tokenizer_r.from_pretrained(lowercase)
a__: Optional[int] = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=True
a__: Dict = tempfile.mkdtemp()
a__: Dict = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__: str = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__: str = tokenizer_r.from_pretrained(lowercase)
a__: Tuple = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=False
a__: int = tempfile.mkdtemp()
a__: List[Any] = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__: Dict = tokenizer_p.save_pretrained(lowercase)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a__: Tuple = tokenizer_r.from_pretrained(lowercase)
a__: str = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
@cached_property
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase , f.name)
a__: int = XLMRobertaTokenizer(f.name , keep_accents=lowercase)
a__: int = pickle.dumps(lowercase)
pickle.loads(lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a__: Tuple = self.get_tokenizer()
a__: List[Any] = self.get_rust_tokenizer()
a__: List[str] = 'I was born in 92000, and this is falsé.'
a__: List[Any] = tokenizer.tokenize(lowercase)
a__: List[Any] = rust_tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: str = tokenizer.encode(lowercase , add_special_tokens=lowercase)
a__: Optional[int] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase)
self.assertListEqual(lowercase , lowercase)
a__: Union[str, Any] = self.get_rust_tokenizer()
a__: Dict = tokenizer.encode(lowercase)
a__: Optional[Any] = rust_tokenizer.encode(lowercase)
self.assertListEqual(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Any = 'Hello World!'
a__: Optional[Any] = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase))
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
a__: Optional[int] = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase))
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Dict = {'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 217
|
"""simple docstring"""
import math
def __a ( ) ->None:
a__: int = input('Enter message: ' )
a__: List[str] = int(input(F'Enter key [2-{len(_SCREAMING_SNAKE_CASE ) - 1}]: ' ) )
a__: int = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
a__: Optional[int] = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif mode.lower().startswith('d' ):
a__: List[str] = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = [''] * key
for col in range(_SCREAMING_SNAKE_CASE ):
a__: Dict = col
while pointer < len(_SCREAMING_SNAKE_CASE ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Tuple = math.ceil(len(_SCREAMING_SNAKE_CASE ) / key )
a__: Dict = key
a__: str = (num_cols * num_rows) - len(_SCREAMING_SNAKE_CASE )
a__: int = [''] * num_cols
a__: Any = 0
a__: Union[str, Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
a__: int = 0
row += 1
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 217
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 163
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__snake_case = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__snake_case = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Dict=36 , __lowerCamelCase : Optional[Any]=6 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : str=0.02 , __lowerCamelCase : str=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Tuple=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_hidden_groups
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : str ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = AlbertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE = model(_A , token_type_ids=_A )
SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AlbertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*_A )
@slow
def _snake_case ( self : Union[str, Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
| 714
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13
| 698
| 0
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
__SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return test_module_path
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ )
return test_module
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] )
if len(lowerCAmelCase_ ) > 0:
test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_class()
if hasattr(lowerCAmelCase_ , "setUp" ):
test.setUp()
__SCREAMING_SNAKE_CASE = None
if hasattr(lowerCAmelCase_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
__SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_json(lowerCAmelCase_ ) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()}
else:
return o
| 682
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ : Tuple = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 682
| 1
|
'''simple docstring'''
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
stooge(a , 0 , len(a ) - 1 )
return arr
def UpperCamelCase ( a , a , a ) -> Dict:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__magic_name__ , __magic_name__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__magic_name__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a , a , (h - t) )
# Recursively sort last 2/3 elements
stooge(a , i + t , (a) )
# Recursively sort first 2/3 elements
stooge(a , a , (h - t) )
if __name__ == "__main__":
_lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 245
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __a ):
@staticmethod
def snake_case__ ( a__ : ArgumentParser ):
__magic_name__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=a__ , required=a__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=a__ , required=a__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=a__ , required=a__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=a__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=a__ , default=a__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=a__ )
def __init__( self : List[str] , a__ : str , a__ : str , a__ : str , a__ : str , a__ : str , *a__ : Optional[Any] , ):
__magic_name__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
__magic_name__ = model_type
__magic_name__ = tf_checkpoint
__magic_name__ = pytorch_dump_output
__magic_name__ = config
__magic_name__ = finetuning_task_name
def snake_case__ ( self : Optional[Any] ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
if "ckpt" in self._tf_checkpoint.lower():
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
else:
__magic_name__ = self._tf_checkpoint
__magic_name__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
a__ , self._config , self._pytorch_dump_output , a__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 245
| 1
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if len(__UpperCamelCase ) <= 1 or n <= 1:
return
insert_next(__UpperCamelCase ,n - 1 )
rec_insertion_sort(__UpperCamelCase ,n - 1 )
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if index >= len(__UpperCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase__: Any = (
collection[index],
collection[index - 1],
)
insert_next(__UpperCamelCase ,index + 1 )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] =input("""Enter integers separated by spaces: """)
_lowerCAmelCase : List[str] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 113
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """is_longer"""]
def __init__( self : str ,A : Union[str, Any]=64 ,A : Tuple=48_000 ,A : Dict=480 ,A : List[str]=10 ,A : str=1_024 ,A : Any=0.0 ,A : Optional[int]=False ,A : float = 0 ,A : float = 14_000 ,A : int = None ,A : str = "fusion" ,A : str = "repeatpad" ,**A : List[Any] ,):
'''simple docstring'''
super().__init__(
feature_size=A ,sampling_rate=A ,padding_value=A ,return_attention_mask=A ,**A ,)
UpperCAmelCase__ : List[Any] = top_db
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Optional[int] = padding
UpperCAmelCase__ : List[Any] = fft_window_size
UpperCAmelCase__ : Optional[Any] = (fft_window_size >> 1) + 1
UpperCAmelCase__ : Any = hop_length
UpperCAmelCase__ : List[str] = max_length_s
UpperCAmelCase__ : List[Any] = max_length_s * sampling_rate
UpperCAmelCase__ : List[Any] = sampling_rate
UpperCAmelCase__ : Optional[int] = frequency_min
UpperCAmelCase__ : Tuple = frequency_max
UpperCAmelCase__ : List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm=A ,mel_scale="""htk""" ,)
UpperCAmelCase__ : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=A ,min_frequency=A ,max_frequency=A ,sampling_rate=A ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowercase ( self : List[str] ,A : np.array ,A : Optional[np.array] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = spectrogram(
A ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=A ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def __lowercase ( self : Optional[Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : List[str] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
UpperCAmelCase__ : int = [0]
# randomly choose index for each part
UpperCAmelCase__ : Tuple = np.random.choice(ranges[0] )
UpperCAmelCase__ : Tuple = np.random.choice(ranges[1] )
UpperCAmelCase__ : str = np.random.choice(ranges[2] )
UpperCAmelCase__ : List[str] = mel[idx_front : idx_front + chunk_frames, :]
UpperCAmelCase__ : List[str] = mel[idx_middle : idx_middle + chunk_frames, :]
UpperCAmelCase__ : Dict = mel[idx_back : idx_back + chunk_frames, :]
UpperCAmelCase__ : Optional[Any] = torch.tensor(mel[None, None, :] )
UpperCAmelCase__ : int = torch.nn.functional.interpolate(
A ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=A )
UpperCAmelCase__ : Dict = mel_shrink[0][0].numpy()
UpperCAmelCase__ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def __lowercase ( self : Any ,A : np.array ,A : Optional[int] ,A : Any ,A : Tuple ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
UpperCAmelCase__ : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
UpperCAmelCase__ : str = len(A ) - max_length
UpperCAmelCase__ : Optional[Any] = np.random.randint(0 ,overflow + 1 )
UpperCAmelCase__ : Optional[int] = waveform[idx : idx + max_length]
UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
UpperCAmelCase__ : Tuple = self._np_extract_fbank_features(A ,self.mel_filters )
UpperCAmelCase__ : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
UpperCAmelCase__ : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
UpperCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] ,axis=0 )
UpperCAmelCase__ : Any = False
else:
UpperCAmelCase__ : Union[str, Any] = self._random_mel_fusion(A ,A ,A )
UpperCAmelCase__ : List[str] = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
UpperCAmelCase__ : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
UpperCAmelCase__ : str = int(max_length / len(A ) )
UpperCAmelCase__ : int = np.stack(np.tile(A ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
UpperCAmelCase__ : List[Any] = int(max_length / len(A ) )
UpperCAmelCase__ : str = np.stack(np.tile(A ,A ) )
UpperCAmelCase__ : Optional[Any] = np.pad(A ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
UpperCAmelCase__ : int = self._np_extract_fbank_features(A ,self.mel_filters )
UpperCAmelCase__ : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
UpperCAmelCase__ : Any = self._np_extract_fbank_features(A ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : str = None ,A : Optional[str] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : Optional[Union[str, TensorType]] = None ,**A : List[str] ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = truncation if truncation is not None else self.truncation
UpperCAmelCase__ : Dict = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[int] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : List[str] = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Any = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
UpperCAmelCase__ : Tuple = [
self._get_input_mel(A ,max_length if max_length else self.nb_max_samples ,A ,A )
for waveform in raw_speech
]
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
UpperCAmelCase__ : List[str] = np.random.randint(0 ,len(A ) )
UpperCAmelCase__ : int = True
if isinstance(input_mel[0] ,A ):
UpperCAmelCase__ : Tuple = [np.asarray(A ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
UpperCAmelCase__ : List[str] = [[longer] for longer in is_longer]
UpperCAmelCase__ : List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
UpperCAmelCase__ : str = BatchFeature(A )
if return_tensors is not None:
UpperCAmelCase__ : int = input_features.convert_to_tensors(A )
return input_features
| 65
| 0
|
'''simple docstring'''
def a_ ( __UpperCAmelCase = 10_00 ) -> int:
"""simple docstring"""
snake_case: Union[str, Any] =2**power
snake_case: Any =str(__snake_case )
snake_case: Tuple =list(__snake_case )
snake_case: str =0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
a = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
a = solution(power)
print('Sum of the digits is: ', result)
| 713
|
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
snake_case: Optional[Any] =str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
snake_case: Union[str, Any] =str(bin(__UpperCAmelCase ) )[2:]
snake_case: List[Any] =max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( _lowercase, unittest.TestCase ):
__magic_name__ : str = RoFormerTokenizer
__magic_name__ : Any = RoFormerTokenizerFast
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = True
def lowercase__ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
def lowercase__ (self : List[str], **__UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **__UpperCAmelCase )
def lowercase__ (self : Optional[Any], **__UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **__UpperCAmelCase )
def lowercase__ (self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''永和服装饰品有限公司,今天天气非常好'''
SCREAMING_SNAKE_CASE : int = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def lowercase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, output_text.split() )
SCREAMING_SNAKE_CASE : Optional[int] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ), __UpperCAmelCase )
def lowercase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, output_text.split() )
SCREAMING_SNAKE_CASE : Optional[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : List[str] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ), __UpperCAmelCase )
def lowercase__ (self : str ) -> str:
"""simple docstring"""
pass
def lowercase__ (self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowercase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
| 507
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case_ = datasets.load_iris()
snake_case_ = np.array(data["""data"""])
snake_case_ = np.array(data["""target"""])
snake_case_ = data["""target_names"""]
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(X, y)
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :List[str]=5 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : Tuple = []
for data_point in data:
SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Dict = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Tuple = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 507
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = None ):
__snake_case : List[Any] = word_bank or []
# create a table
__snake_case : int = len(__lowerCamelCase ) + 1
__snake_case : Optional[int] = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
__snake_case : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
__snake_case : Optional[int] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 706
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_snake_case : List[Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_snake_case : Any = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_snake_case : str = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="binary" ):
__snake_case : Union[str, Any] = simple_accuracy(__lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = float(fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average=__lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = {}
for id_pred, label in zip(__lowerCamelCase , __lowerCamelCase ):
__snake_case : int = F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__snake_case : str = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__snake_case : Optional[Any] = [(pred, label)]
__snake_case , __snake_case : Dict = [], []
for question, preds_labels in question_map.items():
__snake_case , __snake_case : int = zip(*__lowerCamelCase )
__snake_case : Optional[Any] = fa_score(y_true=__lowerCamelCase , y_pred=__lowerCamelCase , average="macro" )
fas.append(__lowerCamelCase )
__snake_case : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__lowerCamelCase ) )
ems.append(__lowerCamelCase )
__snake_case : Tuple = float(sum(__lowerCamelCase ) / len(__lowerCamelCase ) )
__snake_case : Any = sum(__lowerCamelCase ) / len(__lowerCamelCase )
__snake_case : int = float(fa_score(y_true=__lowerCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a (datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def __snake_case ( self : Any ) -> int:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] ) -> str:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(lowerCamelCase , lowerCamelCase , fa_avg="macro" )
elif self.config_name == "record":
__snake_case : Tuple = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__snake_case : List[str] = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(lowerCamelCase , lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 203
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( a__ , unittest.TestCase ):
snake_case__ = None
snake_case__ = BloomTokenizerFast
snake_case__ = BloomTokenizerFast
snake_case__ = True
snake_case__ = False
snake_case__ = '''tokenizer_file'''
snake_case__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **_snake_case ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowerCAmelCase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowerCAmelCase = tokenizer.batch_encode_plus(_snake_case )['input_ids']
self.assertListEqual(_snake_case , _snake_case )
lowerCAmelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase = 'This is a simple input'
lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
lowerCAmelCase = ('This is a simple input', 'This is a pair')
lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
tokenizer_r.encode(_snake_case , max_length=_snake_case )
tokenizer_r.batch_encode_plus(_snake_case , max_length=_snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowerCAmelCase = None # Hotfixing padding = None
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_snake_case )
lowerCAmelCase = next(iter(_snake_case ) )['premise'] # pick up one data
lowerCAmelCase = list(sample_data.values() )
lowerCAmelCase = list(map(tokenizer.encode , _snake_case ) )
lowerCAmelCase = [tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case ) for x in output_tokens]
self.assertListEqual(_snake_case , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 4
|
'''simple docstring'''
import math
def a__ ( a__ ):
"""simple docstring"""
return math.sqrt(a__ ) * math.sqrt(a__ ) == num
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = n
while left <= right:
__SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__SCREAMING_SNAKE_CASE = mid - 1
else:
__SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase_ : int = logging.getLogger(__name__)
class a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=-1 ) -> Union[str, Any]:
_a : int = label_idx
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_a : Optional[Any] = mode.value
_a : Tuple = os.path.join(_lowerCamelCase , F'''{mode}.txt''' )
_a : Any = 1
_a : Optional[Any] = []
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_a : List[Any] = []
_a : Tuple = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
_a : Dict = []
_a : str = []
else:
_a : str = line.split(' ' )
words.append(splits[0] )
if len(_lowerCamelCase ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
return examples
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_a : Optional[Any] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_lowerCamelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_a : Union[str, Any] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_lowerCamelCase )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
if path:
with open(_lowerCamelCase , 'r' ) as f:
_a : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
_a : Optional[Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self ) -> List[Any]:
super().__init__(label_idx=-2 )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
if path:
with open(_lowerCamelCase , 'r' ) as f:
_a : Any = f.read().splitlines()
if "O" not in labels:
_a : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[InputExample]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_a : Optional[Any] = mode.value
_a : Any = os.path.join(_lowerCamelCase , F'''{mode}.txt''' )
_a : Optional[int] = 1
_a : str = []
with open(_lowerCamelCase , encoding='utf-8' ) as f:
for sentence in parse_incr(_lowerCamelCase ):
_a : Dict = []
_a : Dict = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_lowerCamelCase , labels=_lowerCamelCase ) )
guid_index += 1
return examples
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_a : Tuple = 0
for sentence in parse_incr(_lowerCamelCase ):
_a : Dict = preds_list[example_id]
_a : List[str] = ''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCamelCase )
example_id += 1
def __UpperCamelCase ( self , lowerCamelCase_ ) -> List[str]:
if path:
with open(_lowerCamelCase , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 712
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : List[str] = Mock()
_a : str = conn, Mock()
_a : Union[str, Any] = iter([1, None] )
_a : List[str] = lambda A : next(A )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 424
| 0
|
import logging
import os
from .state import PartialState
class __SCREAMING_SNAKE_CASE( logging.LoggerAdapter ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Union[str, Any] ) -> Optional[Any]:
snake_case__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
snake_case__ = kwargs.pop('main_process_only' , lowerCamelCase__ )
snake_case__ = kwargs.pop('in_order' , lowerCamelCase__ )
if self.isEnabledFor(lowerCamelCase__ ):
if self._should_log(lowerCamelCase__ ):
snake_case__ = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
elif in_order:
snake_case__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case__ = self.process(lowerCamelCase__ , lowerCamelCase__ )
self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
state.wait_for_everyone()
def a_ ( _A , _A = None ) -> Optional[int]:
"""simple docstring"""
if log_level is None:
snake_case__ = os.environ.get('ACCELERATE_LOG_LEVEL' , UpperCAmelCase_ )
snake_case__ = logging.getLogger(UpperCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase_ , {} )
| 328
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Union[str, Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195
| 0
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = x
snake_case_ :Union[str, Any] = y
for step in range(UpperCAmelCase__ ): # noqa: B007
snake_case_ :Union[str, Any] = a * a - b * b + x
snake_case_ :List[Any] = 2 * a * b + y
snake_case_ :str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def A_ ( _lowercase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def A_ ( _lowercase ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__, 1, 1 ) )
def A_ ( _lowercase = 800, _lowercase = 600, _lowercase = -0.6, _lowercase = 0, _lowercase = 3.2, _lowercase = 50, _lowercase = True, ):
'''simple docstring'''
snake_case_ :Dict = Image.new("""RGB""", (image_width, image_height) )
snake_case_ :Dict = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
snake_case_ :Dict = figure_width / image_width * image_height
snake_case_ :Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case_ :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case_ :Union[str, Any] = get_distance(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case_ :List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
snake_case_ :Optional[Any] = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__a = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 714
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def A_ ( _lowercase ):
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""", _lowercase, )
if isinstance(_lowercase, torch.Tensor ):
return image
elif isinstance(_lowercase, PIL.Image.Image ):
snake_case_ :Tuple = [image]
if isinstance(image[0], PIL.Image.Image ):
snake_case_, snake_case_ :List[str] = image[0].size
snake_case_, snake_case_ :List[str] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case_ :Any = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case_ :Optional[Any] = np.concatenate(_lowercase, axis=0 )
snake_case_ :Optional[Any] = np.array(_lowercase ).astype(np.floataa ) / 255.0
snake_case_ :str = image.transpose(0, 3, 1, 2 )
snake_case_ :List[str] = 2.0 * image - 1.0
snake_case_ :Dict = torch.from_numpy(_lowercase )
elif isinstance(image[0], torch.Tensor ):
snake_case_ :int = torch.cat(_lowercase, dim=0 )
return image
def A_ ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase, torch.Tensor ):
return mask
elif isinstance(_lowercase, PIL.Image.Image ):
snake_case_ :Optional[Any] = [mask]
if isinstance(mask[0], PIL.Image.Image ):
snake_case_, snake_case_ :List[str] = mask[0].size
snake_case_, snake_case_ :Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ :List[str] = [np.array(m.convert("""L""" ).resize((w, h), resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
snake_case_ :Optional[Any] = np.concatenate(_lowercase, axis=0 )
snake_case_ :List[Any] = mask.astype(np.floataa ) / 255.0
snake_case_ :Dict = 0
snake_case_ :List[Any] = 1
snake_case_ :List[str] = torch.from_numpy(_lowercase )
elif isinstance(mask[0], torch.Tensor ):
snake_case_ :List[str] = torch.cat(_lowercase, dim=0 )
return mask
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : UNetaDModel
_A : RePaintScheduler
def __init__( self: Optional[Any] , snake_case: Tuple , snake_case: int ) -> int:
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self: Optional[int] , snake_case: Union[torch.Tensor, PIL.Image.Image] , snake_case: Union[torch.Tensor, PIL.Image.Image] , snake_case: int = 250 , snake_case: float = 0.0 , snake_case: int = 10 , snake_case: int = 10 , snake_case: Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case: Optional[str] = "pil" , snake_case: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
snake_case_ :List[str] = image
snake_case_ :Optional[int] = _preprocess_image(snake_case )
snake_case_ :Union[str, Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ :Tuple = _preprocess_mask(snake_case )
snake_case_ :List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case_ :List[str] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(snake_case )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case_ :int = original_image.shape
snake_case_ :List[str] = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case , snake_case , snake_case , self.device )
snake_case_ :Dict = eta
snake_case_ :Union[str, Any] = self.scheduler.timesteps[0] + 1
snake_case_ :str = generator[0] if isinstance(snake_case , snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case_ :Any = self.unet(snake_case , snake_case ).sample
# compute previous image: x_t -> x_t-1
snake_case_ :Any = self.scheduler.step(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case_ :Optional[Any] = self.scheduler.undo_step(snake_case , snake_case , snake_case )
snake_case_ :Optional[int] = t
snake_case_ :List[str] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ :Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ :Union[str, Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 310
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __SCREAMING_SNAKE_CASE :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : str , __lowercase : Dict , __lowercase : Tuple=13 , __lowercase : Tuple=7 , __lowercase : Union[str, Any]=True , __lowercase : Union[str, Any]=False , __lowercase : Tuple=99 , __lowercase : List[Any]=32 , __lowercase : Optional[int]=2 , __lowercase : Optional[Any]=4 , __lowercase : Any=37 , __lowercase : Dict=0.1 , __lowercase : Dict=0.1 , __lowercase : List[str]=40 , __lowercase : str=2 , __lowercase : Dict=1 , __lowercase : Tuple=0 , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] =batch_size
SCREAMING_SNAKE_CASE__ : Any =seq_length
SCREAMING_SNAKE_CASE__ : List[Any] =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Tuple =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] =eos_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pad_token_id
SCREAMING_SNAKE_CASE__ : Any =bos_token_id
def __magic_name__ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : int =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : Any =prepare_pegasus_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =TFPegasusModel(config=__lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ : str =inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Dict =input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Any =inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__ : List[str] =inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__ : str =1
# first forward pass
SCREAMING_SNAKE_CASE__ : int =model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : int =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : List[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase )[0]
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : str=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Tuple=None, ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : int =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Dict =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : str =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ : Any =TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] =ConfigTester(self , config_class=__lowercase )
def __magic_name__ ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def __magic_name__ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case_ = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case_ = """google/pegasus-xsum"""
@cached_property
def __magic_name__ ( self : str ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__ ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__ ( self : Any , **__lowercase : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =self.translate_src_text(**__lowercase )
assert self.expected_text == generated_words
def __magic_name__ ( self : List[Any] , **__lowercase : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =self.tokenizer(self.src_text , **__lowercase , padding=__lowercase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ : List[Any] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowercase , )
SCREAMING_SNAKE_CASE__ : str =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowercase )
return generated_words
@slow
def __magic_name__ ( self : Tuple ) -> int:
self._assert_generated_batch_equal_expected()
| 296
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
| 1
|
'''simple docstring'''
# using dfs for finding eulerian path traversal
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=None ):
'''simple docstring'''
UpperCAmelCase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
UpperCAmelCase_ , UpperCAmelCase_ = True, True
UpperCAmelCase_ = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return path
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = -1
for i in range(_UpperCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
UpperCAmelCase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = check_circuit_or_path(_UpperCamelCase , _UpperCamelCase )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
UpperCAmelCase_ = 1
if check == 2:
UpperCAmelCase_ = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
UpperCAmelCase_ = dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
UpperCAmelCase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
UpperCAmelCase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
UpperCAmelCase_ = {
1: [],
2: []
# all degree is zero
}
UpperCAmelCase_ = 10
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
check_euler(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 43
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase__ : List[Any] = "src/transformers"
# Matches is_xxx_available()
lowercase__ : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase__ : Any = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase__ : Union[str, Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase__ : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase__ : List[str] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase__ : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase__ : List[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase__ : Optional[Any] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase__ : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase__ : int = re.compile(R"^\s*try:")
# Catches a line with else:
lowercase__ : Any = re.compile(R"^\s*else:")
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
UpperCAmelCase_ = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
UpperCAmelCase_ = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
UpperCAmelCase_ = re.findall(R'''\[([^\]]+)\]''' , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCAmelCase_ = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
UpperCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
UpperCAmelCase_ = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(''', ''' )
UpperCAmelCase_ = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_ = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase : Tuple ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_ = []
for key in import_dict_objects.keys():
UpperCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
UpperCAmelCase_ = os.path.join(_UpperCamelCase , '''__init__.py''' )
UpperCAmelCase_ = parse_init(_UpperCamelCase )
if objects is not None:
UpperCAmelCase_ = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError('''\n\n'''.join(_UpperCamelCase ) )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace(os.path.sep , '''.''' )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_ = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
UpperCAmelCase_ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
lowercase__ : Union[str, Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_ = direct_transformers_import(_UpperCamelCase )
UpperCAmelCase_ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f:
UpperCAmelCase_ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , _UpperCamelCase ) ) )
UpperCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43
| 1
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652
| 1
|
import baseaa
def UpperCamelCase__ ( _A: str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("""utf-8""" ) )
def UpperCamelCase__ ( _A: bytes ):
'''simple docstring'''
return baseaa.baadecode(_A ).decode("""utf-8""" )
if __name__ == "__main__":
_a : Optional[int] = 'Hello World!'
_a : Optional[Any] = baseaa_encode(test)
print(encoded)
_a : Any = baseaa_decode(encoded)
print(decoded)
| 571
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase__ ( _A: Optional[Any] , _A: Any ):
'''simple docstring'''
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
__lowerCamelCase = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
__lowerCamelCase = transform(_A ).unsqueeze(0 ).to(_A )
return image
def UpperCamelCase__ ( _A: Any ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCamelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , _A )
if "blocks" in key:
__lowerCamelCase = re.sub(R"""blocks""" , """layers""" , _A )
if "attn" in key:
__lowerCamelCase = re.sub(R"""attn""" , """self_attn""" , _A )
if "norm1" in key:
__lowerCamelCase = re.sub(R"""norm1""" , """layer_norm1""" , _A )
if "norm2" in key:
__lowerCamelCase = re.sub(R"""norm2""" , """layer_norm2""" , _A )
if "encoder.norm" in key:
__lowerCamelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , _A )
if "encoder.patch_embed.proj" in key:
__lowerCamelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , _A )
if "encoder.pos_embed" in key:
__lowerCamelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , _A )
if "encoder.cls_token" in key:
__lowerCamelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , _A )
if "self_attn" in key:
__lowerCamelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , _A )
return key
@torch.no_grad()
def UpperCamelCase__ ( _A: List[Any] , _A: Dict=None ):
'''simple docstring'''
if config_path is not None:
__lowerCamelCase = BlipConfig.from_pretrained(_A )
else:
__lowerCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCamelCase = BlipForConditionalGeneration(_A ).eval()
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
__lowerCamelCase = blip_decoder(pretrained=_A , image_size=384 , vit="""base""" )
__lowerCamelCase = pt_model.eval()
__lowerCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
hf_model.load_state_dict(_A )
__lowerCamelCase = 384
__lowerCamelCase = load_demo_image(image_size=_A , device="""cpu""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = tokenizer(["""a picture of"""] ).input_ids
__lowerCamelCase = hf_model.generate(_A , _A )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCamelCase = hf_model.generate(_A )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_A )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCamelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
__lowerCamelCase = blip_vqa(pretrained=_A , image_size=_A , vit="""base""" )
vqa_model.eval()
__lowerCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForQuestionAnswering(_A )
hf_vqa_model.load_state_dict(_A )
__lowerCamelCase = ["""How many dogs are in this image?"""]
__lowerCamelCase = tokenizer(_A , return_tensors="""pt""" ).input_ids
__lowerCamelCase = hf_vqa_model.generate(_A , _A )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__lowerCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
__lowerCamelCase = blip_itm(pretrained=_A , image_size=_A , vit="""base""" )
itm_model.eval()
__lowerCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCamelCase = modified_state_dict.pop(_A )
__lowerCamelCase = rename_key(_A )
__lowerCamelCase = value
__lowerCamelCase = BlipForImageTextRetrieval(_A )
__lowerCamelCase = ["""A picture of a woman with a dog sitting in a beach"""]
__lowerCamelCase = tokenizer(
_A , return_tensors="""pt""" , padding="""max_length""" , truncation=_A , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_A )
hf_itm_model.eval()
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
__lowerCamelCase = hf_itm_model(_A , _A , use_itm_head=_A )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 571
| 1
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , 0.1 )
A = Accelerator()
A = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 690
|
"""simple docstring"""
def __snake_case ( UpperCamelCase__ ) -> list[int]:
"""simple docstring"""
A = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
"""simple docstring"""
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case_ = logging.get_logger(__name__)
@dataclass
class a__ :
__magic_name__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
__magic_name__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__ : int = field(
default=1_28, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
__magic_name__ : bool = field(
default=a__, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowercase__ (self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.task_name.lower()
class a__ ( a__ ):
__magic_name__ : Any = "train"
__magic_name__ : Optional[int] = "dev"
__magic_name__ : List[Any] = "test"
class a__ ( a__ ):
__magic_name__ : GlueDataTrainingArguments
__magic_name__ : str
__magic_name__ : List[InputFeatures]
def __init__(self : Tuple, __UpperCAmelCase : Any, __UpperCAmelCase : List[str], __UpperCAmelCase : Optional[int] = None, __UpperCAmelCase : Tuple = Split.train, __UpperCAmelCase : Tuple = None, ) -> Any:
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''', lowerCamelCase_, )
SCREAMING_SNAKE_CASE : Union[str, Any] = args
SCREAMING_SNAKE_CASE : Any = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''', )
SCREAMING_SNAKE_CASE : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : Any = cached_features_file + '''.lock'''
with FileLock(lowerCamelCase_ ):
if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : List[Any] = time.time()
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowerCamelCase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : int = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : Any = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = examples[:limit_length]
SCREAMING_SNAKE_CASE : Dict = glue_convert_examples_to_features(
lowerCamelCase_, lowerCamelCase_, max_length=args.max_seq_length, label_list=lowerCamelCase_, output_mode=self.output_mode, )
SCREAMING_SNAKE_CASE : int = time.time()
torch.save(self.features, lowerCamelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__(self : List[str] ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__(self : Optional[Any], __UpperCAmelCase : Optional[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowercase__ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.label_list
| 710
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
return x + 2
class a__ ( unittest.TestCase ):
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3'''
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
SCREAMING_SNAKE_CASE : str = '''x = y'''
SCREAMING_SNAKE_CASE : int = {'''y''': 5}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 3'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE : Any = {'''x''': 8}
SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} )
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, [3, 5] )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''y = x'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase )
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
| 355
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int , __A : int , __A : Optional[Any]=13 , __A : Any=7 , __A : Any=True , __A : List[Any]=True , __A : str=True , __A : int=True , __A : Optional[Any]=99 , __A : Dict=32 , __A : int=5 , __A : Optional[int]=4 , __A : Dict=37 , __A : List[str]="gelu" , __A : str=0.1 , __A : int=0.1 , __A : Optional[int]=512 , __A : Optional[int]=16 , __A : List[str]=2 , __A : List[Any]=0.0_2 , __A : int=4 , ):
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : int = seq_length
__A : Any = is_training
__A : Optional[int] = use_attention_mask
__A : List[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : Optional[Any] = vocab_size
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : Optional[int] = hidden_act
__A : Any = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : List[str] = initializer_range
__A : List[Any] = num_choices
def lowerCAmelCase_ ( self : List[Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Any = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A , __A : Tuple = config_and_inputs
__A : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
__A : Tuple = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[Any] = config_and_inputs
__A : Optional[int] = True
__A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = True
_lowercase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : List[str] ):
__A : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
__A : List[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : int ):
__A : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : List[Any] = model(__A )[0]
__A : str = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
__A : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
__A : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : Optional[int] = model(__A )[0]
# compare the actual values for a slice.
__A : Optional[int] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 17
|
"""simple docstring"""
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 621
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Tuple = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'spiece.model'}
a_ : Optional[Any] = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , __a , __a=False , __a=True , __a=False , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<sep>" , __a="<pad>" , __a="<cls>" , __a="<mask>" , __a=["<eop>", "<eod>"] , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a (self ):
'''simple docstring'''
return len(self.sp_model )
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
if self.remove_space:
lowerCamelCase = " ".join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("NFKD" , __a )
lowerCamelCase = "".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.preprocess_text(__a )
lowerCamelCase = self.sp_model.encode(__a , out_type=__a )
lowerCamelCase = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.PieceToId(__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.IdToPiece(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = "".join(__a ).replace(__a , " " ).strip()
return out_string
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a (self , __a , __a = None , __a = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def _a (self , __a , __a = None ):
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _a (self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = super()._decode(*__a , **__a )
lowerCamelCase = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 484
| 0
|
from math import factorial
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCamelCase__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase__ : Union[str, Any] = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 285
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ = (('''num_inference_steps''', 25),)
def __SCREAMING_SNAKE_CASE ( self , **__SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Dict = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = self.dummy_sample
UpperCamelCase__ : Optional[int] = 0.1 * sample
UpperCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ ,UpperCamelCase__ : Any = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase__ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : int = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = dict(self.forward_default_kwargs )
UpperCamelCase__ : Tuple = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = self.dummy_sample
UpperCamelCase__ : List[Any] = 0.1 * sample
UpperCamelCase__ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Union[str, Any] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Dict = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if scheduler is None:
UpperCamelCase__ : List[str] = self.scheduler_classes[0]
UpperCamelCase__ : str = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = 1_0
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : List[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = dict(self.forward_default_kwargs )
UpperCamelCase__ : List[Any] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.dummy_sample
UpperCamelCase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
UpperCamelCase__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase__ : int = scheduler.timesteps[5]
UpperCamelCase__ : Optional[int] = scheduler.timesteps[6]
UpperCamelCase__ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Any = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase__ : Tuple = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase__ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase__ : Dict = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Any = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __SCREAMING_SNAKE_CASE ( self ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = self.full_loop()
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.full_loop(prediction_type='''v_prediction''' )
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
UpperCamelCase__ : Tuple = scheduler_class(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = 1_0
UpperCamelCase__ : Dict = self.dummy_model()
UpperCamelCase__ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 285
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42
snake_case_ = 42
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : list[list[Edge]] =[[] for _ in range(__lowercase )]
SCREAMING_SNAKE_CASE__ : Dict =size
def __getitem__( self : Dict , __lowercase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __magic_name__ ( self : List[str] ) -> str:
return self._size
def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : int , __lowercase : int ) -> List[Any]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) )
def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> int | None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =deque([start_vertex] )
SCREAMING_SNAKE_CASE__ : list[int | None] =[None] * self.size
SCREAMING_SNAKE_CASE__ : Dict =0
while queue:
SCREAMING_SNAKE_CASE__ : List[str] =queue.popleft()
SCREAMING_SNAKE_CASE__ : Optional[Any] =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE__ : str =current_distance + edge.weight
SCREAMING_SNAKE_CASE__ : Optional[int] =distances[edge.destination_vertex]
if (
isinstance(__lowercase , __lowercase )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE__ : List[Any] =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665
| 1
|
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : Callable , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[dict] = None , snake_case_ : Optional[int] = None , **snake_case_ : List[str] , ):
'''simple docstring'''
super().__init__(
features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
snake_case__ : Optional[Any] = Generator(
cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
snake_case__ : Tuple = None
snake_case__ : List[str] = None
snake_case__ : str = None
snake_case__ : int = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
snake_case__ : Tuple = self.builder.as_dataset(
split='''train''' , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
| 347
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _a ( __lowerCAmelCase : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
snake_case__ : List[Any] = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple[int, ...] ):
"""simple docstring"""
snake_case__ : Optional[int] = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
snake_case__ : Union[str, Any] = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def _a ( __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Sequence[int] , __lowerCAmelCase : Optional[Sequence[bool]] = None , __lowerCAmelCase : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(__lowerCAmelCase : List[bool] ) -> None:
snake_case__ : List[Any] = True
for i in range(len(__lowerCAmelCase ) ):
snake_case__ : Optional[int] = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case__ : Union[str, Any] = l[reversed_idx]
if start_edges is None:
snake_case__ : List[str] = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
snake_case__ : Union[str, Any] = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case__ : List[Tuple[slice, ...]] = []
snake_case__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
snake_case__ : Tuple[slice, ...] = tuple(__lowerCAmelCase )
snake_case__ : List[Any] = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Any = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case__ : Dict = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case__ : str = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _a ( __lowerCAmelCase : torch.Tensor , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] = t.shape[:no_batch_dims]
snake_case__ : str = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
snake_case__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
snake_case__ : Any = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
snake_case__ : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _a ( __lowerCAmelCase : Callable , __lowerCAmelCase : Dict[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool = False , __lowerCAmelCase : Any = None , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
if not (len(__lowerCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case__ : int = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
snake_case__ : str = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case__ : Optional[int] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case__ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
snake_case__ : str = None
if _out is not None:
snake_case__ : int = tensor_tree_map(lambda __lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case__ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__lowerCAmelCase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case__ : Any = 0
snake_case__ : Dict = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
snake_case__ : int = _select_chunk
else:
snake_case__ : Optional[Any] = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
snake_case__ : Dict[str, Any] = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
snake_case__ : Optional[int] = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
snake_case__ : str = tensor_tree_map(lambda __lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(__lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case__ : List[Any] = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case__ : Optional[int] = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case__ : str = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case__ : Optional[Any] = tensor_tree_map(lambda __lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class a :
"""simple docstring"""
def __init__( self : str , snake_case_ : int = 5_1_2 , ):
'''simple docstring'''
snake_case__ : Any = max_chunk_size
snake_case__ : Optional[int] = None
snake_case__ : Optional[tuple] = None
def __magic_name__ ( self : List[Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int ):
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case__ : Any = [c for c in candidates if c > min_chunk_size]
snake_case__ : int = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case_ : int ) -> bool:
try:
with torch.no_grad():
fn(*snake_case_ , chunk_size=snake_case_ )
return True
except RuntimeError:
return False
snake_case__ : Any = 0
snake_case__ : Optional[int] = len(snake_case_ ) - 1
while i > min_viable_chunk_size_index:
snake_case__ : Any = test_chunk_size(candidates[i] )
if not viable:
snake_case__ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
snake_case__ : Tuple = i
snake_case__ : Any = (i + len(snake_case_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __magic_name__ ( self : List[Any] , snake_case_ : Iterable , snake_case_ : Iterable ):
'''simple docstring'''
snake_case__ : str = True
for aa, aa in zip(snake_case_ , snake_case_ ):
assert type(snake_case_ ) == type(snake_case_ )
if isinstance(snake_case_ , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
snake_case__ : Any = [v for _, v in sorted(aa.items() , key=lambda snake_case_ : x[0] )]
consistent &= self._compare_arg_caches(snake_case_ , snake_case_ )
else:
consistent &= aa == aa
return consistent
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Callable , snake_case_ : tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : str = True
snake_case__ : tuple = tree_map(lambda snake_case_ : a.shape if isinstance(snake_case_ , torch.Tensor ) else a , snake_case_ , snake_case_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case_ )
snake_case__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , snake_case_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case__ : str = False
if not consistent:
snake_case__ : Dict = self._determine_favorable_chunk_size(
snake_case_ , snake_case_ , snake_case_ , )
snake_case__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 347
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 659
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=6_4 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.0_2 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self ) -> List[Any]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__lowercase = model(snake_case_ , token_type_ids=snake_case_ )
__lowercase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
'''simple docstring'''
__lowercase = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
# test_resize_embeddings = False
__UpperCAmelCase = False
def A ( self , snake_case_ , snake_case_ , snake_case_=False ) -> Tuple:
'''simple docstring'''
__lowercase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
a : Optional[Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ['''MYDIR'''] , snake_case_ )
__lowercase = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
__lowercase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__lowercase = model(snake_case_ )[0]
__lowercase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , snake_case_ )
__lowercase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = '''ii={} jj={} a={} b={}'''.format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 639
|
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__lowercase = hex_num[0] == '''-'''
if is_negative:
__lowercase = hex_num[1:]
try:
__lowercase = int(_UpperCamelCase , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__lowercase = ''''''
while int_num > 0:
__lowercase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = CTRLTokenizer
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[int] = False
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_A = dict(zip(_a , range(len(_a ) ) ) )
_A = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_A = {'unk_token': '<unk>'}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def lowerCAmelCase_ ( self : int , **_UpperCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_a )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Any ):
_A = 'adapt react readapt apt'
_A = 'adapt react readapt apt'
return input_text, output_text
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = 'adapt react readapt apt'
_A = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_A = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A = tokens + [tokenizer.unk_token]
_A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
| 721
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a = get_logger(__name__)
a = Path(__file__).parent / '''model_card_template.md'''
a = uuida().hex
a = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
a = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def _snake_case ( _snake_case : Union[Dict, str, None] = None ) -> str:
'''simple docstring'''
_A = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
if token is None:
_A = HfFolder.get_token()
if organization is None:
_A = whoami(_snake_case )['name']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def _snake_case ( _snake_case : Optional[int] , _snake_case : int ) -> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(_snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
_A = args.hub_token if hasattr(_snake_case , 'hub_token' ) else None
_A = get_full_repo_name(_snake_case , token=_snake_case )
_A = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(_snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
_A = os.path.join(args.output_dir , 'README.md' )
model_card.save(_snake_case )
def _snake_case ( _snake_case : Optional[str] , _snake_case : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
_A = str(Path(_snake_case ).as_posix() )
_A = re.search(R'snapshots/([^/]+)/' , _snake_case )
if search is None:
return None
_A = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
a = os.path.join(hf_cache_home, '''diffusers''')
def _snake_case ( _snake_case : Optional[str] = None , _snake_case : Optional[str] = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
_A = DIFFUSERS_CACHE
if old_cache_dir is None:
_A = old_diffusers_cache
_A = Path(_snake_case ).expanduser()
_A = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
_A = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
a = 0
else:
with open(cache_version_file) as f:
try:
a = int(f.read())
except ValueError:
a = 0
if cache_version < 1:
a = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
a = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'''the directory exists and can be written to.'''
)
def _snake_case ( _snake_case : str , _snake_case : Optional[str] = None ) -> str:
'''simple docstring'''
if variant is not None:
_A = weights_name.split('.' )
_A = splits[:-1] + [variant] + splits[-1:]
_A = '.'.join(_snake_case )
return weights_name
def _snake_case ( _snake_case : Tuple , *,
_snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : List[str]=None , ) -> int:
'''simple docstring'''
_A = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
_A = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
_A = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
_A = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}\' so that the correct variant file can be added.''' , _snake_case , )
try:
# 2. Load model file as usual
_A = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 505
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a = Lock()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_UpperCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_UpperCAmelCase = min(snake_case , snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_UpperCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_UpperCAmelCase = max(snake_case , snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
_UpperCAmelCase = []
_UpperCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
for i in range(1 , len(snake_case ) - 1 ):
_UpperCAmelCase = Pipe()
_UpperCAmelCase = Pipe()
process_array_.append(
Process(
target=snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_UpperCAmelCase = temp_rs
_UpperCAmelCase = temp_rr
process_array_.append(
Process(
target=snake_case , args=(
len(snake_case ) - 1,
arr[len(snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case ) ):
_UpperCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*snake_case )
_UpperCAmelCase = odd_even_transposition(snake_case )
print("""Sorted List\n""" )
print(*snake_case )
if __name__ == "__main__":
main()
| 518
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a = "facebook/wmt19-en-de"
a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
a = tokenizer(["Making tiny model"], return_tensors="pt")
a = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
a = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 518
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__lowerCamelCase = device
__lowerCamelCase = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase )
__lowerCamelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__lowerCamelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowerCamelCase = torchvision.transforms.Resize(224 )
__lowerCamelCase = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.resize(__UpperCAmelCase )
__lowerCamelCase = self.center_crop(__UpperCAmelCase )
__lowerCamelCase = self.normalize(__UpperCAmelCase )
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = self.preprocess_img(__UpperCAmelCase )
__lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = None
__lowerCamelCase = device if device else get_device()
if vqgan:
__lowerCamelCase = vqgan
else:
__lowerCamelCase = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase )
self.vqgan.eval()
if clip:
__lowerCamelCase = clip
else:
__lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowerCamelCase = ProcessorGradientFlow(device=self.device )
__lowerCamelCase = iterations
__lowerCamelCase = lr
__lowerCamelCase = log
__lowerCamelCase = make_grid
__lowerCamelCase = return_val
__lowerCamelCase = quantize
__lowerCamelCase = self.vqgan.decoder.z_shape
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = []
if output_path is None:
__lowerCamelCase = '''./animation.gif'''
if input_path is None:
__lowerCamelCase = self.save_path
__lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(__UpperCAmelCase ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__UpperCAmelCase ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowerCamelCase = total_duration / len(__UpperCAmelCase )
__lowerCamelCase = [frame_duration] * len(__UpperCAmelCase )
if extend_frames:
__lowerCamelCase = 1.5
__lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__UpperCAmelCase ) )
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase )
print(F"""gif saved to {output_path}""" )
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowerCamelCase = preprocess(Image.open(__UpperCAmelCase ) , target_image_size=256 ).to(self.device )
__lowerCamelCase = preprocess_vqgan(__UpperCAmelCase )
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.encode(__UpperCAmelCase )
return z
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.latent.detach().requires_grad_()
__lowerCamelCase = base_latent + transform_vector
if self.quantize:
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.quantize(__UpperCAmelCase )
else:
__lowerCamelCase = trans_latent
return self.vqgan.decode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase )
__lowerCamelCase = self.clip(**__UpperCAmelCase )
__lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , __UpperCAmelCase , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , __UpperCAmelCase , weights=neg_prompts['''weights'''] )
else:
__lowerCamelCase = torch.tensor([1] , device=self.device )
__lowerCamelCase = -torch.log(__UpperCAmelCase ) + torch.log(__UpperCAmelCase )
return loss
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device )
__lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCamelCase = self._add_vector(__UpperCAmelCase )
__lowerCamelCase = loop_post_process(__UpperCAmelCase )
__lowerCamelCase = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
print('''CLIP loss''' , __UpperCAmelCase )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
wandb.init(reinit=__UpperCAmelCase , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowerCamelCase = Image.open(__UpperCAmelCase )
__lowerCamelCase = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__UpperCAmelCase ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not prompts:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list) ):
__lowerCamelCase = prompt[0]
__lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCamelCase ,__lowerCamelCase = prompt.split(''':''' )
__lowerCamelCase = float(__UpperCAmelCase )
else:
__lowerCamelCase = prompt
__lowerCamelCase = 1.0
processed_prompts.append(__UpperCAmelCase )
weights.append(__UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device ),
}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
if image_path:
__lowerCamelCase = self._get_latent(__UpperCAmelCase )
else:
__lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCamelCase = self.process_prompts(__UpperCAmelCase )
__lowerCamelCase = self.process_prompts(__UpperCAmelCase )
if save_final and save_path is None:
__lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
__lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(__UpperCAmelCase )
__lowerCamelCase = save_path
__lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__UpperCAmelCase ) )
__lowerCamelCase = loop_post_process(__UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
if show_intermediate:
show_pil(__UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__UpperCAmelCase )} )
if show_final:
show_pil(__UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 705
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''[PAD]'''
__lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__UpperCAmelCase ) , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
__lowerCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowerCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = [35389, 6672, 49, 2]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
# fmt: off
__lowerCamelCase = {'''input_ids''': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 622
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
SCREAMING_SNAKE_CASE__ : Any = TypeVar("""T""")
SCREAMING_SNAKE_CASE__ : List[Any] = Union[List[T], Tuple[T, ...]]
SCREAMING_SNAKE_CASE__ : int = Union[T, List[T], Dict[str, T]]
SCREAMING_SNAKE_CASE__ : Optional[int] = Union[str, bytes, os.PathLike]
| 79
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCamelCase =logging.get_logger(__name__)
class A__ :
def __init__( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Any = question_encoder
lowerCamelCase : Dict = generator
lowerCamelCase : Tuple = self.question_encoder
def UpperCamelCase__ ( self , __magic_name__ ):
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowerCamelCase : Any = os.path.join(__magic_name__ , """question_encoder_tokenizer""" )
lowerCamelCase : str = os.path.join(__magic_name__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase : Any = kwargs.pop("""config""" , __magic_name__ )
if config is None:
lowerCamelCase : Tuple = RagConfig.from_pretrained(__magic_name__ )
lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ):
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self , *__magic_name__ , **__magic_name__ ):
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.question_encoder
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.generator
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __magic_name__ , )
if max_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : int = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase : int = self.current_tokenizer.model_max_length
lowerCamelCase : Dict = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
lowerCamelCase : List[Any] = labels["""input_ids"""]
return model_inputs
| 681
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowercase : Optional[Any] = tuple[int, int, int]
_lowercase : List[Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_lowercase : List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
_lowercase : Optional[int] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
_lowercase : str = "FOBHMDKEXQNRAULPGSJVTYICZW"
_lowercase : Dict = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
_lowercase : str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
_lowercase : List[str] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
_lowercase : Any = "SGLCPQWZHKXAREONTFBVIYJUDM"
_lowercase : List[str] = "HVSICLTYKQUBXDWAJZOMFGPREN"
_lowercase : str = "RZWQHFMVDBKICJLNTUXAGYPSOE"
_lowercase : Optional[Any] = "LFKIJODBEGAMQPXVUHYSTCZRWN"
_lowercase : Union[str, Any] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def lowerCamelCase ( UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT , UpperCAmelCase__ : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(UpperCAmelCase__ ) )) < 3:
lowercase_ : Union[str, Any] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(UpperCAmelCase__ )
# Checks if rotor positions are valid
lowercase_ , lowercase_ , lowercase_ : List[Any] = rotpos
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(UpperCAmelCase__ )
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : List[str] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase__ )
if not 0 < rotorposa <= len(UpperCAmelCase__ ):
lowercase_ : Dict = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(UpperCAmelCase__ )
# Validates string and returns dict
lowercase_ : Optional[int] = _plugboard(UpperCAmelCase__ )
return rotpos, rotsel, pbdict
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''Plugboard setting isn\'t type string ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
elif len(UpperCAmelCase__ ) % 2 != 0:
lowercase_ : Tuple = F'''Odd number of symbols ({len(UpperCAmelCase__ )})'''
raise Exception(UpperCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
lowercase_ : List[Any] = set()
for i in pbstring:
if i not in abc:
lowercase_ : Optional[int] = F'''\'{i}\' not in list of symbols'''
raise Exception(UpperCAmelCase__ )
elif i in tmppbl:
lowercase_ : int = F'''Duplicate symbol ({i})'''
raise Exception(UpperCAmelCase__ )
else:
tmppbl.add(UpperCAmelCase__ )
del tmppbl
# Created the dictionary
lowercase_ : int = {}
for j in range(0 , len(UpperCAmelCase__ ) - 1 , 2 ):
lowercase_ : int = pbstring[j + 1]
lowercase_ : Optional[Any] = pbstring[j]
return pb
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : RotorPositionT , UpperCAmelCase__ : RotorSelectionT = (rotora, rotora, rotora) , UpperCAmelCase__ : str = "" , ) -> str:
lowercase_ : Any = text.upper()
lowercase_ , lowercase_ , lowercase_ : Any = _validator(
UpperCAmelCase__ , UpperCAmelCase__ , plugb.upper() )
lowercase_ , lowercase_ , lowercase_ : List[Any] = rotor_position
lowercase_ , lowercase_ , lowercase_ : Optional[int] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase_ : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase_ : Dict = plugboard[symbol]
# rotor ra --------------------------
lowercase_ : Optional[int] = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : Dict = rotora[index % len(UpperCAmelCase__ )]
# rotor rb --------------------------
lowercase_ : str = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )]
# rotor rc --------------------------
lowercase_ : Dict = abc.index(UpperCAmelCase__ ) + rotorposa
lowercase_ : List[Any] = rotora[index % len(UpperCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase_ : Union[str, Any] = reflector[symbol]
# 2nd rotors
lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
lowercase_ : List[str] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
lowercase_ : Union[str, Any] = abc[rotora.index(UpperCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase_ : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : int = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(UpperCAmelCase__ ):
lowercase_ : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : List[Any] = "This is my Python script that emulates the Enigma machine from WWII."
_lowercase : Tuple = (1, 1, 1)
_lowercase : str = "pictures"
_lowercase : Any = (rotora, rotora, rotora)
_lowercase : Optional[int] = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) ->Dict:
"""simple docstring"""
if name is None:
lowerCAmelCase__ :Dict = None
else:
lowerCAmelCase__ :List[Any] = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase__ :Union[str, Any] = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ':' , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ':' , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Dict = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase__ :Dict = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase__ :Optional[int] = param.view(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = param.transpose(0 , 2 )
lowerCAmelCase__ :int = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase__ :str = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase__ :Union[str, Any] = param.view(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase__ :str = param.view(*_SCREAMING_SNAKE_CASE )
return param
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :str = {}
# old versions did not store training args
lowerCAmelCase__ :List[str] = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase__ :Optional[Any] = ds_args.padded_vocab_size
lowerCAmelCase__ :str = ds_args.max_position_embeddings
lowerCAmelCase__ :str = ds_args.hidden_size
lowerCAmelCase__ :int = ds_args.num_layers
lowerCAmelCase__ :List[str] = ds_args.num_attention_heads
lowerCAmelCase__ :Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase__ :Dict = config.n_head
# The hidden_size per head.
lowerCAmelCase__ :str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase__ :Optional[int] = input_state_dict['checkpoint_version']
else:
lowerCAmelCase__ :Optional[Any] = 0.0
# The model.
lowerCAmelCase__ :int = input_state_dict['model']
# The language model.
lowerCAmelCase__ :Any = model['language_model']
# The embeddings.
lowerCAmelCase__ :List[Any] = lm['embedding']
# The word embeddings.
lowerCAmelCase__ :str = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase__ :str = word_embeddings[: config.vocab_size, :]
lowerCAmelCase__ :str = word_embeddings
# The position embeddings.
lowerCAmelCase__ :Optional[int] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase__ :Optional[int] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
lowerCAmelCase__ :List[Any] = pos_embeddings
# The transformer.
lowerCAmelCase__ :Any = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase__ :str = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase__ :str = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase__ :Tuple = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase__ :List[str] = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase__ :int = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase__ :Optional[int] = m.group(3 )
# The name of the layer.
lowerCAmelCase__ :Optional[Any] = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase__ :Optional[int] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase__ :Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase__ :Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase__ :List[Any] = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase__ :Dict = masked_bias
lowerCAmelCase__ :Tuple = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase__ :Tuple = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase__ :Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase__ :Optional[Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
lowerCAmelCase__ :Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase__ :Optional[int] = megatron_to_transformers[op_name]
lowerCAmelCase__ :Dict = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase__ :Any = megatron_to_transformers[op_name]
lowerCAmelCase__ :Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase__ :Optional[Any] = transformer['final_layernorm.weight']
lowerCAmelCase__ :int = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase__ :Tuple = word_embeddings
# It should be done!
return output_state_dict
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_SCREAMING_SNAKE_CASE , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_SCREAMING_SNAKE_CASE , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase__ :str = parser.parse_args()
# Extract the basename.
lowerCAmelCase__ :Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase__ :int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
lowerCAmelCase__ :Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase__ :Tuple = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase__ :Optional[Any] = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase__ :Any = 'gelu_new'
else:
lowerCAmelCase__ :str = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase__ :Union[str, Any] = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase__ :int = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowerCAmelCase__ :Dict = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase__ :List[Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase__ :Union[str, Any] = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase__ :Any = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase__ :Dict = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase__ :int = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
lowerCAmelCase__ :Tuple = 'gpt2'
lowerCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = type(_SCREAMING_SNAKE_CASE ).__name__
lowerCAmelCase__ :Optional[Any] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
lowerCAmelCase__ :Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 93
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( lowerCAmelCase_ ):
@slow
@require_torch
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCAmelCase : Dict ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowerCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(__lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCAmelCase : int ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCAmelCase ) )] ) / len(__lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCAmelCase , batch_size=__lowerCAmelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__lowerCAmelCase , per_device_train_batch_size=__lowerCAmelCase , per_device_eval_batch_size=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , evaluation_strategy="""steps""" , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# start training
trainer.train()
| 277
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : Union[str, Any] , __a : str , __a : Optional[int]=13 , __a : Union[str, Any]=30 , __a : Dict=2 , __a : Optional[Any]=3 , __a : Tuple=True , __a : Optional[Any]=True , __a : Dict=32 , __a : List[Any]=5 , __a : Dict=4 , __a : List[Any]=37 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=10 , __a : List[Any]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Tuple , __a : Optional[int] , __a : Optional[int] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : List[Any] , __a : Any , __a : int ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : str ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Any ):
self.config_tester.run_common_tests()
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : str ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : Any ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Dict ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 415
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def _lowercase (*__a : Any , **__a : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class __A ( unittest.TestCase ):
@require_torch
def _lowercase (self : int ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__a ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@require_tf
def _lowercase (self : List[str] ):
UpperCAmelCase_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__a ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
[
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
{"score": 0.3_33, "label": ANY(__a )},
],
] , )
@slow
@require_torch
def _lowercase (self : Dict ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase_ = image_classifier(__a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__a ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__a ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 415
| 1
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowercase__ ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowercase__ ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=False ):
__a : Optional[int] = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__a : int = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__a : Tuple = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__a : Optional[Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__a : Dict = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__a : str = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__a : Optional[Any] = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__a : Optional[int] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__a : Any = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__a : int = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__a : Dict = checkpoint[f"{old_prefix}.skip_connection.weight"]
__a : Tuple = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : str=None ):
__a : Tuple = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__a : Dict = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__a : Tuple = checkpoint[f"{old_prefix}.norm.weight"]
__a : Optional[int] = checkpoint[f"{old_prefix}.norm.bias"]
__a : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
__a : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
__a : int = weight_k.squeeze(-1 ).squeeze(-1 )
__a : Union[str, Any] = bias_k.squeeze(-1 ).squeeze(-1 )
__a : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
__a : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
__a : int = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__a : Tuple = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ):
__a : Any = torch.load(A_ , map_location='''cpu''' )
__a : str = {}
__a : Union[str, Any] = checkpoint['''time_embed.0.weight''']
__a : str = checkpoint['''time_embed.0.bias''']
__a : Any = checkpoint['''time_embed.2.weight''']
__a : List[str] = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
__a : Optional[Any] = checkpoint['''label_emb.weight''']
__a : Union[str, Any] = checkpoint['''input_blocks.0.0.weight''']
__a : Optional[Any] = checkpoint['''input_blocks.0.0.bias''']
__a : Optional[int] = unet_config['''down_block_types''']
__a : List[str] = unet_config['''layers_per_block''']
__a : Any = unet_config['''attention_head_dim''']
__a : List[str] = unet_config['''block_out_channels''']
__a : Dict = 1
__a : List[str] = channels_list[0]
for i, layer_type in enumerate(A_ ):
__a : List[Any] = channels_list[i]
__a : List[str] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
__a : Optional[int] = f"down_blocks.{i}.resnets.{j}"
__a : Optional[Any] = f"input_blocks.{current_layer}.0"
__a : List[str] = True if j == 0 and downsample_block_has_skip else False
__a : Optional[int] = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
__a : Optional[int] = f"down_blocks.{i}.resnets.{j}"
__a : Tuple = f"input_blocks.{current_layer}.0"
__a : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False
__a : Dict = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
__a : str = f"down_blocks.{i}.attentions.{j}"
__a : Optional[Any] = f"input_blocks.{current_layer}.1"
__a : List[str] = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
__a : List[str] = f"down_blocks.{i}.downsamplers.0"
__a : str = f"input_blocks.{current_layer}.0"
__a : Optional[int] = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
__a : str = current_channels
# hardcoded the mid-block for now
__a : List[Any] = '''mid_block.resnets.0'''
__a : Optional[int] = '''middle_block.0'''
__a : int = convert_resnet(A_ , A_ , A_ , A_ )
__a : Dict = '''mid_block.attentions.0'''
__a : Tuple = '''middle_block.1'''
__a : str = convert_attention(A_ , A_ , A_ , A_ , A_ )
__a : Optional[Any] = '''mid_block.resnets.1'''
__a : List[str] = '''middle_block.2'''
__a : Optional[int] = convert_resnet(A_ , A_ , A_ , A_ )
__a : Dict = 0
__a : int = unet_config['''up_block_types''']
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__a : Dict = f"up_blocks.{i}.resnets.{j}"
__a : Tuple = f"output_blocks.{current_layer}.0"
__a : Tuple = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
__a : Any = f"up_blocks.{i}.upsamplers.0"
__a : List[Any] = f"output_blocks.{current_layer-1}.1"
__a : Optional[int] = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__a : Union[str, Any] = f"up_blocks.{i}.resnets.{j}"
__a : int = f"output_blocks.{current_layer}.0"
__a : List[str] = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
__a : Tuple = f"up_blocks.{i}.attentions.{j}"
__a : Union[str, Any] = f"output_blocks.{current_layer}.1"
__a : Dict = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
__a : List[Any] = f"up_blocks.{i}.upsamplers.0"
__a : Optional[Any] = f"output_blocks.{current_layer-1}.2"
__a : Tuple = convert_resnet(A_ , A_ , A_ , A_ )
__a : Union[str, Any] = checkpoint['''out.0.weight''']
__a : int = checkpoint['''out.0.bias''']
__a : Union[str, Any] = checkpoint['''out.2.weight''']
__a : List[str] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
lowercase__ =parser.parse_args()
lowercase__ =strabool(args.class_cond)
lowercase__ =os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ =TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowercase__ =None
lowercase__ =con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
lowercase__ =CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 521
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
for i in range(len(A_ ) - 1 , 0 , -1 ):
lowerCAmelCase__ : Optional[Any] = False
for j in range(A_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j - 1], unsorted[j]
lowerCAmelCase__ : Dict = True
for j in range(A_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCAmelCase__ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 450
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = '''bit'''
lowerCAmelCase_ = ['''preactivation''', '''bottleneck''']
lowerCAmelCase_ = ['''SAME''', '''VALID''']
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=64 , lowerCAmelCase_=[256, 512, 1024, 2048] , lowerCAmelCase_=[3, 4, 6, 3] , lowerCAmelCase_="preactivation" , lowerCAmelCase_="relu" , lowerCAmelCase_=None , lowerCAmelCase_=32 , lowerCAmelCase_=0.0 , lowerCAmelCase_=False , lowerCAmelCase_=32 , lowerCAmelCase_=1 , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_snake_case = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_snake_case = num_channels
_snake_case = embedding_size
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = layer_type
_snake_case = hidden_act
_snake_case = global_padding
_snake_case = num_groups
_snake_case = drop_path_rate
_snake_case = embedding_dynamic_padding
_snake_case = output_stride
_snake_case = width_factor
_snake_case = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 701
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCamelCase( self ):
_snake_case = self.dummy_uncond_unet
_snake_case = ScoreSdeVeScheduler()
_snake_case = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase ).images
_snake_case = torch.manual_seed(0 )
_snake_case = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=lowerCamelCase , return_dict=lowerCamelCase )[
0
]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = "google/ncsnpp-church-256"
_snake_case = UNetaDModel.from_pretrained(lowerCamelCase )
_snake_case = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase )
_snake_case = ScoreSdeVePipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
sde_ve.to(lowerCamelCase )
sde_ve.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_snake_case = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase :List[Any] = "bart"
__lowercase :str = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
SCREAMING_SNAKE_CASE__ : Dict = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
SCREAMING_SNAKE_CASE__ : Tuple = qar_model.eval()
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
SCREAMING_SNAKE_CASE__ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
SCREAMING_SNAKE_CASE__ : int = sas_model.eval()
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ : str = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
SCREAMING_SNAKE_CASE__ : str = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE__ : str = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE__ : List[str] = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = (None, None)
SCREAMING_SNAKE_CASE__ : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
SCREAMING_SNAKE_CASE__ : List[Any] = elia["train_eli5"]
SCREAMING_SNAKE_CASE__ : str = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase :int = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase :Tuple = load_models()
__lowercase , __lowercase :Union[str, Any] = load_train_data()
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int="wiki40b" , _lowerCamelCase : int="dense" , _lowerCamelCase : List[str]=10 ):
'''simple docstring'''
if source == "none":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="english_wiki40b_snippets_100w" , n_results=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : str = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
SCREAMING_SNAKE_CASE__ : str = "question: {} context: {}".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=64 , _lowerCamelCase : str=256 , _lowerCamelCase : List[str]=False , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=0.9_5 , _lowerCamelCase : Dict=0.8 ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__lowercase :Tuple = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__lowercase :int = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase :Union[str, Any] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase :Optional[Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__lowercase :Tuple = st.sidebar.checkbox("Demo options")
if demo_options:
__lowercase :Any = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__lowercase :Optional[Any] = action_list.index(action_st)
__lowercase :Any = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__lowercase :int = show_type == "Show full text of passages"
else:
__lowercase :List[Any] = 3
__lowercase :int = True
__lowercase :int = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__lowercase :Dict = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__lowercase :Tuple = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__lowercase :List[str] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__lowercase :Dict = "wiki40b"
__lowercase :Any = "dense"
__lowercase :List[str] = "beam"
__lowercase :List[str] = 2
__lowercase :Union[str, Any] = 64
__lowercase :Tuple = 256
__lowercase :Optional[Any] = None
__lowercase :Optional[Any] = None
__lowercase :Dict = st.sidebar.checkbox("Generation options")
if generate_options:
__lowercase :Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__lowercase :int = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__lowercase :Any = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowercase :str = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase :Dict = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase :List[str] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__lowercase :Union[str, Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__lowercase :List[str] = None
# start main text
__lowercase :List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__lowercase :int = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase :Tuple = st.text_input("Enter your question here:", "")
else:
__lowercase :Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase :int = make_support(question, source=wiki_source, method="dense", n_results=10)
__lowercase , __lowercase :Dict = make_support(question, source=wiki_source, method="sparse", n_results=10)
__lowercase :int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase :Optional[Any] = support_list[:10]
__lowercase :Union[str, Any] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__lowercase , __lowercase :str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase :List[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__lowercase :Union[str, Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__lowercase :List[Any] = res[1].strip()
if sec_titles == "":
__lowercase :List[str] = "[{}]({})".format(res[0], wiki_url)
else:
__lowercase :List[str] = sec_titles.split(" & ")
__lowercase :Dict = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase :Tuple = find_nearest_training(question)
__lowercase :str = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__lowercase :Tuple = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__lowercase :str = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ :Optional[int] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[str] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 618
|
def lowerCAmelCase__ ( a__: list ) -> list:
'''simple docstring'''
if len(a__ ) < 2:
return collection
def circle_sort_util(a__: list , a__: int , a__: int ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(a__ , a__ , a__ )
_UpperCAmelCase = circle_sort_util(a__ , mid + 1 , a__ )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(a__ , 0 , len(a__ ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ :List[str] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 618
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( snake_case__ ):
'''simple docstring'''
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , snake_case__ , )
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(snake_case__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(snake_case__ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(snake_case__ , dim=0 )
return image
def A ( snake_case__ ):
'''simple docstring'''
if isinstance(snake_case__ , torch.Tensor ):
return mask
elif isinstance(snake_case__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(snake_case__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(snake_case__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(snake_case__ , dim=0 )
return mask
class lowerCamelCase (A__ ):
lowerCamelCase__ : UNetaDModel
lowerCamelCase__ : RePaintScheduler
def __init__( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : List[str] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , __UpperCAmelCase : int = 2_5_0 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 616
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Dict = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 616
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ["""small""", """medium""", """large"""]
UpperCamelCase_ = """lm_head.decoder.weight"""
UpperCamelCase_ = """lm_head.weight"""
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : List[str] =torch.load(__magic_name__ )
lowercase : List[str] =d.pop(__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
UpperCamelCase_ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 92
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
__lowerCamelCase = args.pruning_method
__lowerCamelCase = args.threshold
__lowerCamelCase = args.model_name_or_path.rstrip("""/""" )
__lowerCamelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
__lowerCamelCase = torch.load(os.path.join(_snake_case , """pytorch_model.bin""" ) )
__lowerCamelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCamelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
__lowerCamelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
__lowerCamelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
__lowerCamelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
__lowerCamelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[f'{prefix_}mask_scores']
__lowerCamelCase = TopKBinarizer.apply(_snake_case , _snake_case )
__lowerCamelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[f'{prefix_}mask_scores']
__lowerCamelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
__lowerCamelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCamelCase = name[:-6]
__lowerCamelCase = model[f'{prefix_}mask_scores']
__lowerCamelCase, __lowerCamelCase = -0.1, 1.1
__lowerCamelCase = torch.sigmoid(_snake_case )
__lowerCamelCase = s * (r - l) + l
__lowerCamelCase = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCamelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
__lowerCamelCase = os.path.join(
os.path.dirname(_snake_case ) , f'bertarized_{os.path.basename(_snake_case )}' )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(f'\nCreated folder {target_model_path}' )
torch.save(_snake_case , os.path.join(_snake_case , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
UpperCAmelCase_ = parser.parse_args()
main(args)
| 711
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'bert'
def __init__( self: List[str] , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: Optional[int]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=5_12 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: int=0.02 , UpperCamelCase_: List[str]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: Tuple=True , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Any ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 80
| 0
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _a (unittest.TestCase , __magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : List[str] = load_tool("""text-to-speech""" )
self.tool.setup()
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Tuple = self.tool("""hey""" )
A__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Any = self.tool("""hey""" )
A__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 456
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A_ ( __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray , __UpperCamelCase : np.ndarray , __UpperCamelCase : int , __UpperCamelCase : int ):
lowercase = cva.getAffineTransform(__UpperCamelCase , __UpperCamelCase )
return cva.warpAffine(__UpperCamelCase , __UpperCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 396
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase : List[Any] = '''ViTImageProcessor'''
__lowerCAmelCase : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , a : Optional[int]=None , a : str=None , **a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
lowercase = kwargs.pop('''feature_extractor''' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Optional[int] , a : str=None , a : List[Any]=None , a : List[Any]=None , a : Optional[Any]=None , **a : Dict ) -> Optional[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
lowercase = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
lowercase = self.image_processor(a , return_tensors=a , **a )
if images is not None:
lowercase = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
lowercase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCAmelCase ( self : Any , *a : str , **a : Tuple ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def _lowerCAmelCase ( self : Optional[Any] , *a : Tuple , **a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 396
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
_lowerCAmelCase : List[str] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
_lowerCAmelCase : Optional[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
_lowerCAmelCase : Dict = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"
] , )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Optional[int] , snake_case :int , snake_case :List[Any]=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ),
}
| 454
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : List[str] = "▁"
__a : int = {"vocab_file": "spiece.model"}
__a : Any = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
__a : Any = {
"google/pegasus-xsum": 512,
}
__a : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : List[Any]="<mask_2>" , UpperCamelCase_ : List[Any]="<mask_1>" , UpperCamelCase_ : str=None , UpperCamelCase_ : Dict=103 , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : int , ):
"""simple docstring"""
__A = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError(
F"additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is"
F" {type(UpperCamelCase_ )}" )
__A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(UpperCamelCase_ ) , self.offset - 1 )
]
if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__A = additional_special_tokens_extended
else:
__A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__A = mask_token_sent
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# add special tokens to encoder dict
__A = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__A = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
"""simple docstring"""
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Tuple , UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str ):
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__A = self.sp_model.piece_to_id(UpperCamelCase_ )
return sp_id + self.offset
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__A = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : List[str] ):
"""simple docstring"""
__A = []
__A = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__A = []
else:
current_sub_tokens.append(UpperCamelCase_ )
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : List[Any]=False ):
"""simple docstring"""
return 1
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 637
| 0
|
def _lowerCamelCase( lowerCAmelCase__ : Tuple = 6008_5147_5143 ):
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_ : List[Any] = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : int = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE_ : int = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 712
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : Optional[Union[str, Path]] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Dict] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Optional[Union[str, bool]] = None
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : Optional[Dict] = None
UpperCAmelCase__ : Optional[str] = None
def __snake_case ( self ):
return self.__class__(**{k: copy.deepcopy(UpperCamelCase__ ) for k, v in self.__dict__.items()} )
| 97
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.