code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310 |
import math
def _A ( _lowercase ) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
__UpperCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCamelCase = f'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , _lowercase ):
for _ in range(_lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 310 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCamelCase, __lowerCamelCase, is_trivia_qa=__lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 257 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 1000 ) -> int:
"""simple docstring"""
__lowerCamelCase = -1
__lowerCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowerCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowerCamelCase = n - a - b
if c * c == (a * a + b * b):
__lowerCamelCase = a * b * c
if candidate >= product:
__lowerCamelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__A = {
"E": 1_2.7_0,
"T": 9.0_6,
"A": 8.1_7,
"O": 7.5_1,
"I": 6.9_7,
"N": 6.7_5,
"S": 6.3_3,
"H": 6.0_9,
"R": 5.9_9,
"D": 4.2_5,
"L": 4.0_3,
"C": 2.7_8,
"U": 2.7_6,
"M": 2.4_1,
"W": 2.3_6,
"F": 2.2_3,
"G": 2.0_2,
"Y": 1.9_7,
"P": 1.9_3,
"B": 1.2_9,
"V": 0.9_8,
"K": 0.7_7,
"J": 0.1_5,
"X": 0.1_5,
"Q": 0.1_0,
"Z": 0.0_7,
}
__A = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
__A = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> dict[str, int]:
"""simple docstring"""
__lowerCamelCase = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase_ ( UpperCamelCase__ : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = get_letter_count(UpperCamelCase__ )
__lowerCamelCase = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ )
__lowerCamelCase = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCamelCase__ )
__lowerCamelCase = ''.join(freq_to_letter[freq] )
__lowerCamelCase = list(freq_to_letter_str.items() )
freq_pairs.sort(key=UpperCamelCase__ , reverse=UpperCamelCase__ )
__lowerCamelCase = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__lowerCamelCase = get_frequency_order(UpperCamelCase__ )
__lowerCamelCase = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ : List[Any] = logging.get_logger(__name__)
a_ : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a_ ( __snake_case : str ) -> Any:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ =model_type_to_module_name(__snake_case )
lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ =importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase )
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase )
lowerCamelCase_ =True
lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase )
lowerCamelCase_ =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ):
lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase )
if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase )
lowerCamelCase_ =feature_extractor_auto_map is not None
lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ =resolve_trust_remote_code(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if has_remote_code and trust_remote_code:
lowerCamelCase_ =get_class_from_dynamic_module(
lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )]
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
| 75 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : Optional[int] = logging.getLogger(__name__)
def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =np.argmax(__snake_case , axis=1 )
return np.sum(outputs == labels )
def a_ ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
with open(__snake_case , encoding='''utf_8''' ) as f:
lowerCamelCase_ =csv.reader(__snake_case )
lowerCamelCase_ =[]
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for dataset in encoded_datasets:
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =mc_label
lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--seed''' , type=__snake_case , default=42 )
parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 )
parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 )
parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=__snake_case , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 )
parser.add_argument('''--n_valid''' , type=__snake_case , default=374 )
parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase_ =parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ =torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case )
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case , __snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCamelCase_ =load_rocstories_dataset(args.train_dataset )
lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset )
lowerCamelCase_ =(train_dataset, eval_dataset)
lowerCamelCase_ =tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
lowerCamelCase_ =model.config.n_positions // 2 - 2
lowerCamelCase_ =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case )
lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1]
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =RandomSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size )
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =SequentialSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCamelCase_ =args.max_steps
lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCamelCase_ =list(model.named_parameters() )
lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCamelCase_ =[
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCamelCase_ =get_linear_schedule_with_warmup(
__snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case )
if args.do_train:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' )
for step, batch in enumerate(__snake_case ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCamelCase_ =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
torch.save(model_to_save.state_dict() , __snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
lowerCamelCase_, lowerCamelCase_ =0, 0
lowerCamelCase_, lowerCamelCase_ =0, 0
for batch in tqdm(__snake_case , desc='''Evaluating''' ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
with torch.no_grad():
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model(
__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =mc_logits.detach().cpu().numpy()
lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy()
lowerCamelCase_ =accuracy(__snake_case , __snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCamelCase_ =eval_loss / nb_eval_steps
lowerCamelCase_ =eval_accuracy / nb_eval_examples
lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None
lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' )
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 75 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = UNetaDModel(
sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') ,)
return model
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') ,cross_attention_dim=10 ,)
return model
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE :List[str] = AutoencoderKL(
sample_size=(1_28, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') ,up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = UNetaDModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') ,up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') ,)
return vqvae, unet
@slow
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
__SCREAMING_SNAKE_CASE :Tuple = DDPMScheduler()
__SCREAMING_SNAKE_CASE :Tuple = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE__ ,unet=self.dummy_unet ,mel=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(42 )
__SCREAMING_SNAKE_CASE :Any = pipe(generator=SCREAMING_SNAKE_CASE__ ,steps=4 )
__SCREAMING_SNAKE_CASE :Tuple = output.audios[0]
__SCREAMING_SNAKE_CASE :int = output.images[0]
__SCREAMING_SNAKE_CASE :Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(42 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE__ ,steps=4 ,return_dict=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__SCREAMING_SNAKE_CASE :Any = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:10]
__SCREAMING_SNAKE_CASE :Any = np.frombuffer(image_from_tuple.tobytes() ,dtype='''uint8''' )[:10]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__SCREAMING_SNAKE_CASE :Dict = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
__SCREAMING_SNAKE_CASE :Optional[int] = DDIMScheduler()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.dummy_vqvae_and_unet
__SCREAMING_SNAKE_CASE :List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
np.random.seed(0 )
__SCREAMING_SNAKE_CASE :Any = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__SCREAMING_SNAKE_CASE :Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(42 )
__SCREAMING_SNAKE_CASE :List[str] = pipe(raw_audio=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,start_step=5 ,steps=10 )
__SCREAMING_SNAKE_CASE :Dict = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__SCREAMING_SNAKE_CASE :int = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:10]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.dummy_unet_condition
__SCREAMING_SNAKE_CASE :List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=SCREAMING_SNAKE_CASE__ ,mel=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
np.random.seed(0 )
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.rand((1, 1, 10) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE__ ,encoding=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = output.images[0]
__SCREAMING_SNAKE_CASE :List[str] = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:10]
__SCREAMING_SNAKE_CASE :Any = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = torch_device
__SCREAMING_SNAKE_CASE :str = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
__SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(42 )
__SCREAMING_SNAKE_CASE :str = pipe(generator=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = output.audios[0]
__SCREAMING_SNAKE_CASE :List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__SCREAMING_SNAKE_CASE :int = np.frombuffer(image.tobytes() ,dtype='''uint8''' )[:10]
__SCREAMING_SNAKE_CASE :Any = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 | 239 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@require_torch
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = pipeline(
task='''zero-shot-audio-classification''' ,model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
__SCREAMING_SNAKE_CASE :Any = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :int = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :Dict = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = pipeline(
task='''zero-shot-audio-classification''' ,model='''laion/clap-htsat-unfused''' ,)
# This is an audio of a dog
__SCREAMING_SNAKE_CASE :List[Any] = load_dataset('''ashraq/esc50''' )
__SCREAMING_SNAKE_CASE :Tuple = dataset['''train''']['''audio'''][-1]['''array''']
__SCREAMING_SNAKE_CASE :str = audio_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] ,)
__SCREAMING_SNAKE_CASE :Dict = audio_classifier([audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
__SCREAMING_SNAKE_CASE :Union[str, Any] = audio_classifier(
[audio] * 5 ,candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ,batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) ,[
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 ,)
@unittest.skip('''No models are available in TF''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass | 239 | 1 |
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
A__ = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
A__ = len(__a ) if (len(__a ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__a ) , """Postfix""".center(__a ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__a ) == 0:
stack.append(__a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__a ) # push x to stack
print(
x.center(8 ) , ("""""".join(__a )).ljust(__a ) , ("""""".join(__a )).ljust(__a ) , sep=""" | """ , ) # Output in tabular format
while len(__a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__a )).ljust(__a ) , ("""""".join(__a )).ljust(__a ) , sep=""" | """ , ) # Output in tabular format
return "".join(__a ) # return Postfix as str
def __lowerCamelCase ( __a :Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__a ) ):
if infix[i] == "(":
A__ = """)""" # change "(" to ")"
elif infix[i] == ")":
A__ = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A : Tuple = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A : List[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 274 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str , __magic_name__ : Dict , __magic_name__ : str=13 , __magic_name__ : Dict=[30, 30] , __magic_name__ : Any=2 , __magic_name__ : Optional[int]=3 , __magic_name__ : Dict=True , __magic_name__ : Tuple=True , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Optional[Any]=5 , __magic_name__ : List[str]=4 , __magic_name__ : Any=37 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=10 , __magic_name__ : str=0.02 , __magic_name__ : int=3 , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=8 , __magic_name__ : int=10 , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = n_targets
SCREAMING_SNAKE_CASE_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE_ = num_patches + 1 + self.num_detection_tokens
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE_ = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.rand(self.n_targets , 4 , device=__magic_name__ )
labels.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[Any] ) -> List[Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = YolosModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __A ( self : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection(__magic_name__ )
model.to(__magic_name__ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE_ = model(pixel_values=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __A ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : int , __magic_name__ : List[str] , __magic_name__ : List[Any] , __magic_name__ : str=False ) -> int:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE_ = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = torch.ones(
size=(self.model_tester.n_targets,) , device=__magic_name__ , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.ones(
self.model_tester.n_targets , 4 , device=__magic_name__ , dtype=torch.float )
labels.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = labels
return inputs_dict
def __A ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = YolosModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def __A ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def __A ( self : str ) -> Tuple:
# YOLOS does not use inputs_embeds
pass
def __A ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def __A ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_ = len(__magic_name__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __A ( self : Dict ) -> str:
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ):
SCREAMING_SNAKE_CASE_ = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__magic_name__ )
@slow
def __A ( self : Any ) -> Optional[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = YolosModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE_ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE_ = image_processor.post_process_object_detection(
__magic_name__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__magic_name__ )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __magic_name__ , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __magic_name__ )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __magic_name__ ) )
| 305 | import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 305 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a ( snake_case__: Any , snake_case__: Union[str, Any] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = OmegaConf.load(snake_case__ )
lowercase_ = torch.load(snake_case__ , map_location='''cpu''' )['''model''']
lowercase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase_ = {}
lowercase_ = '''first_stage_model.'''
for key in keys:
if key.startswith(snake_case__ ):
lowercase_ = state_dict[key]
# extract state_dict for UNetLDM
lowercase_ = {}
lowercase_ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(snake_case__ ):
lowercase_ = state_dict[key]
lowercase_ = config.model.params.first_stage_config.params
lowercase_ = config.model.params.unet_config.params
lowercase_ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
lowercase_ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
lowercase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
lowercase_ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__a = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 30 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = 1_0
lowercase_ = self.get_scheduler_config()
lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps[0]
lowercase_ = scheduler.timesteps[1]
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = 1
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_):
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1, 0]
lowercase_ = len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 136 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Dict = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Tuple = prime_factors(__SCREAMING_SNAKE_CASE )
if is_square_free(__SCREAMING_SNAKE_CASE ):
return -1 if len(__SCREAMING_SNAKE_CASE ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_a = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """albert"""
def __init__( self , lowercase_=3_0000 , lowercase_=128 , lowercase_=4096 , lowercase_=12 , lowercase_=1 , lowercase_=64 , lowercase_=1_6384 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Any = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class A_ (lowercase__ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 61 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase__ : List[Any] = "pt"
elif is_tf_available():
lowerCamelCase__ : Tuple = "tf"
else:
lowerCamelCase__ : List[str] = "jax"
class lowerCamelCase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowercase_ = ByTaTokenizer
lowercase_ = False
def lowerCAmelCase_ ( self : List[str] ):
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def lowerCAmelCase_ ( self : List[Any] , **_lowerCAmelCase : int ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=20 , _lowerCAmelCase : Optional[int]=5 ):
SCREAMING_SNAKE_CASE_ = []
for i in range(len(lowerCamelCase__ ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _lowerCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase__ ) , lowerCamelCase__ ) )
if max_length is not None and len(lowerCamelCase__ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0:
while len(lowerCamelCase__ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
if " " not in output_txt and len(lowerCamelCase__ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
return output_txt, output_ids
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
SCREAMING_SNAKE_CASE_ = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = '''Unicode €.'''
SCREAMING_SNAKE_CASE_ = tokenizer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , lowerCamelCase__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , 'Unicode €.</s>' )
SCREAMING_SNAKE_CASE_ = tokenizer('e è é ê ë' )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , lowerCamelCase__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCamelCase__ )
self.assertIn('attention_mask' , lowerCamelCase__ )
self.assertNotIn('decoder_input_ids' , lowerCamelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCamelCase__ )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=lowerCamelCase__ , max_length=32 , padding='max_length' , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization. </s>''']
SCREAMING_SNAKE_CASE_ = ['''Summary of the text. </s>''']
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(lowerCamelCase__ , text_target=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , batch['input_ids'][0] )
self.assertEqual(lowerCamelCase__ , batch['labels'][0] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
shutil.rmtree(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['bim', 'bambam'] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase__ )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = [F"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCamelCase__ )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(lowerCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE_ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
for attr in attributes_list:
setattr(lowerCamelCase__ , attr + '_id' , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + '_id' ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , attr + '_id' , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(getattr(lowerCamelCase__ , attr + '_id' ) , lowerCamelCase__ )
setattr(lowerCamelCase__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCamelCase__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCamelCase__ , 'additional_special_tokens_ids' ) , [] )
setattr(lowerCamelCase__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 357 |
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 210 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a ( _UpperCAmelCase : Any=None ):
'''simple docstring'''
if subparsers is not None:
__UpperCAmelCase : Optional[Any] = subparsers.add_parser('''test''' )
else:
__UpperCAmelCase : str = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__UpperCAmelCase : Tuple = script_name
else:
__UpperCAmelCase : Union[str, Any] = f'--config_file={args.config_file} {script_name}'
__UpperCAmelCase : str = ['''accelerate-launch'''] + test_args.split()
__UpperCAmelCase : str = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : str = test_command_parser()
__UpperCAmelCase : List[str] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = None
if token is not None:
__UpperCAmelCase : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : List[str] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__UpperCAmelCase : Union[str, Any] = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
__UpperCAmelCase : Union[str, Any] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__UpperCAmelCase : List[Any] = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = None
if token is not None:
__UpperCAmelCase : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__UpperCAmelCase : str = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
__UpperCAmelCase : List[Any] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__UpperCAmelCase : str = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Dict = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = None
if token is not None:
__UpperCAmelCase : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : int = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = result.headers['''Location''']
__UpperCAmelCase : Optional[int] = requests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
__UpperCAmelCase : int = os.path.join(_UpperCAmelCase , f'{artifact_name}.zip' )
with open(_UpperCAmelCase , '''wb''' ) as fp:
fp.write(response.content )
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : List[Any] = None
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCAmelCase ) as f:
for line in f:
__UpperCAmelCase : Tuple = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : int = line[: line.index(''': ''' )]
__UpperCAmelCase : Any = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__UpperCAmelCase : str = line[len('''FAILED ''' ) :]
failed_tests.append(_UpperCAmelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : Tuple = line
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` '
f'and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
''' problem.''' )
__UpperCAmelCase : str = None
if job_name and job_links:
__UpperCAmelCase : Any = job_links.get(_UpperCAmelCase , _UpperCAmelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : Dict = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase )]
return result
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=None ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : str = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase ) )
return errors
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : Dict = counter.most_common()
__UpperCAmelCase : int = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : Tuple = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : Union[str, Any] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__UpperCAmelCase : Optional[int] = test.split('''/''' )[2]
else:
__UpperCAmelCase : Optional[Any] = None
return test
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : int = [x for x in logs if x[2] is not None]
__UpperCAmelCase : Tuple = {x[2] for x in logs}
__UpperCAmelCase : List[str] = {}
for test in tests:
__UpperCAmelCase : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : Tuple = counter.most_common()
__UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : Tuple = {'''count''': n_errors, '''errors''': error_counts}
__UpperCAmelCase : Optional[int] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = '''| no. | error | status |'''
__UpperCAmelCase : List[str] = '''|-:|:-|:-|'''
__UpperCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]['''count''']
__UpperCAmelCase : int = f'| {count} | {error[:1_00]} | |'
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''| model | no. of errors | major error | count |'''
__UpperCAmelCase : List[Any] = '''|-:|-:|-:|-:|'''
__UpperCAmelCase : Tuple = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : List[Any] = reduced_by_model[model]['''count''']
__UpperCAmelCase , __UpperCAmelCase : int = list(reduced_by_model[model]['''errors'''].items() )[0]
__UpperCAmelCase : Dict = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__A =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A =get_job_links(args.workflow_run_id, token=args.token)
__A ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A =k.find(" / ")
__A =k[index + len(" / ") :]
__A =v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A =counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A =reduce_by_error(errors)
__A =reduce_by_model(errors)
__A =make_github_table(reduced_by_error)
__A =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 226 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = 'gpt_neo'
_snake_case : Optional[int] = ['past_key_values']
_snake_case : List[str] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Optional[int] , lowerCAmelCase__ : str=50257 , lowerCAmelCase__ : Any=2048 , lowerCAmelCase__ : List[Any]=2048 , lowerCAmelCase__ : Optional[int]=24 , lowerCAmelCase__ : Any=[[["global", "local"], 12]] , lowerCAmelCase__ : Optional[int]=16 , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Optional[int]="gelu_new" , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : int=1e-5 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Any=50256 , lowerCAmelCase__ : Optional[int]=50256 , **lowerCAmelCase__ : List[str] , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_layers
_UpperCamelCase = num_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = window_size
_UpperCamelCase = activation_function
_UpperCamelCase = resid_dropout
_UpperCamelCase = embed_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = classifier_dropout
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = use_cache
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = attention_types
_UpperCamelCase = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def a__ ( lowercase : int, lowercase : str, lowercase : Any, lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCamelCase = input.size()
_UpperCamelCase = len(lowercase )
_UpperCamelCase = shape[dimension]
_UpperCamelCase = torch.arange(0, lowercase, lowercase )
_UpperCamelCase = torch.div(sizedim - size, lowercase, rounding_mode='''floor''' ) + 1
_UpperCamelCase = torch.arange(lowercase ) + low_indices[:min_length][:, None]
_UpperCamelCase = [slice(lowercase )] * rank
_UpperCamelCase = indices
_UpperCamelCase = input[s]
_UpperCamelCase = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowercase )
def a__ ( lowercase : int, lowercase : List[str] ) -> str:
"""simple docstring"""
import torch
_UpperCamelCase = torch.arange(1, lowercase )
_UpperCamelCase = torch.remainder(lowercase, lowercase )
_UpperCamelCase = remainders == 0
_UpperCamelCase = candidates[divisor_indices]
_UpperCamelCase = torch.max(lowercase )
return largest_divisor, torch.div(lowercase, lowercase, rounding_mode='''floor''' )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def snake_case__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='''inputs''' )
_UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
return self._config.num_heads
def snake_case__ ( self : Any , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase = common_inputs['''attention_mask''']
if self.use_past:
_UpperCamelCase = ordered_inputs['''attention_mask'''].dtype
_UpperCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
| 363 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CamembertTokenizer
_snake_case : str = CamembertTokenizerFast
_snake_case : int = True
_snake_case : List[str] = True
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 287 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91 | 1 |
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = 1
__A = 2
while i * i <= n:
__A = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(a_ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 369 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE :Any = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[tuple[int, int]]:
a__ , a__: Union[str, Any] = position
a__: int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a__: Any = []
for position in positions:
a__ , a__: List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def __a ( _SCREAMING_SNAKE_CASE ) ->bool:
return not any(elem == 0 for row in board for elem in row )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
a__ , a__: Tuple = position
if board[y][x] == 0:
a__: Dict = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
a__: Tuple = 0
return False
def __a ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]:
a__: Union[str, Any] = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
a__: List[str] = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
a__: Optional[int] = 0
a__: Tuple = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | """simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _SCREAMING_SNAKE_CASE ( __a ):
def __init__( self : str ):
__magic_name__ = []
def snake_case__ ( self : Optional[int] , a__ : Dict , a__ : Optional[int] , a__ : Optional[Any] , **a__ : Tuple ):
self.events.append('''on_init_end''' )
def snake_case__ ( self : Tuple , a__ : int , a__ : Union[str, Any] , a__ : List[str] , **a__ : str ):
self.events.append('''on_train_begin''' )
def snake_case__ ( self : int , a__ : List[Any] , a__ : Tuple , a__ : Dict , **a__ : str ):
self.events.append('''on_train_end''' )
def snake_case__ ( self : Optional[int] , a__ : List[str] , a__ : Tuple , a__ : List[Any] , **a__ : List[str] ):
self.events.append('''on_epoch_begin''' )
def snake_case__ ( self : Union[str, Any] , a__ : int , a__ : Optional[Any] , a__ : List[str] , **a__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def snake_case__ ( self : List[Any] , a__ : Tuple , a__ : Any , a__ : int , **a__ : int ):
self.events.append('''on_step_begin''' )
def snake_case__ ( self : Any , a__ : str , a__ : Dict , a__ : Union[str, Any] , **a__ : Tuple ):
self.events.append('''on_step_end''' )
def snake_case__ ( self : List[Any] , a__ : int , a__ : List[Any] , a__ : Tuple , **a__ : Union[str, Any] ):
self.events.append('''on_evaluate''' )
def snake_case__ ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : List[Any] , a__ : Tuple , **a__ : List[Any] ):
self.events.append('''on_predict''' )
def snake_case__ ( self : int , a__ : Dict , a__ : str , a__ : str , **a__ : Dict ):
self.events.append('''on_save''' )
def snake_case__ ( self : Optional[Any] , a__ : Any , a__ : int , a__ : int , **a__ : List[str] ):
self.events.append('''on_log''' )
def snake_case__ ( self : Union[str, Any] , a__ : Optional[int] , a__ : Optional[int] , a__ : str , **a__ : int ):
self.events.append('''on_prediction_step''' )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__magic_name__ = tempfile.mkdtemp()
def snake_case__ ( self : Any ):
shutil.rmtree(self.output_dir )
def snake_case__ ( self : int , a__ : Dict=0 , a__ : Any=0 , a__ : Any=64 , a__ : Optional[int]=64 , a__ : Union[str, Any]=None , a__ : List[Any]=False , **a__ : List[str] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__magic_name__ = RegressionDataset(length=a__ )
__magic_name__ = RegressionDataset(length=a__ )
__magic_name__ = RegressionModelConfig(a=a__ , b=a__ )
__magic_name__ = RegressionPreTrainedModel(a__ )
__magic_name__ = TrainingArguments(self.output_dir , disable_tqdm=a__ , report_to=[] , **a__ )
return Trainer(
a__ , a__ , train_dataset=a__ , eval_dataset=a__ , callbacks=a__ , )
def snake_case__ ( self : Union[str, Any] , a__ : Optional[int] , a__ : Union[str, Any] ):
self.assertEqual(len(a__ ) , len(a__ ) )
# Order doesn't matter
__magic_name__ = sorted(a__ , key=lambda a__ : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ )
__magic_name__ = sorted(a__ , key=lambda a__ : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ )
for cba, cba in zip(a__ , a__ ):
if isinstance(a__ , a__ ) and isinstance(a__ , a__ ):
self.assertEqual(a__ , a__ )
elif isinstance(a__ , a__ ) and not isinstance(a__ , a__ ):
self.assertEqual(a__ , cba.__class__ )
elif not isinstance(a__ , a__ ) and isinstance(a__ , a__ ):
self.assertEqual(cba.__class__ , a__ )
else:
self.assertEqual(a__ , a__ )
def snake_case__ ( self : Optional[Any] , a__ : Tuple ):
__magic_name__ = ['''on_init_end''', '''on_train_begin''']
__magic_name__ = 0
__magic_name__ = len(trainer.get_eval_dataloader() )
__magic_name__ = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(a__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def snake_case__ ( self : List[str] ):
__magic_name__ = self.get_trainer()
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# Callbacks passed at init are added to the default callbacks
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__magic_name__ = self.get_trainer(disable_tqdm=a__ )
__magic_name__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
def snake_case__ ( self : int ):
__magic_name__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__magic_name__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a__ )
expected_callbacks.remove(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.pop_callback(a__ )
self.assertEqual(cb.__class__ , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
trainer.add_callback(a__ )
expected_callbacks.insert(0 , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
# We can also add, pop, or remove by instance
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a__ )
expected_callbacks.remove(a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
__magic_name__ = self.get_trainer()
__magic_name__ = trainer.callback_handler.callbacks[0]
__magic_name__ = trainer.pop_callback(a__ )
self.assertEqual(a__ , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
trainer.add_callback(a__ )
expected_callbacks.insert(0 , a__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ )
def snake_case__ ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=a__ )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# Independent log/save/eval
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
__magic_name__ = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# A bit of everything
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__magic_name__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a__ , self.get_expected_events(a__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__magic_name__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(a__ ) in warn_mock.call_args[0][0]
| 98 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a__ : int , ):
__magic_name__ = parent
__magic_name__ = 13
__magic_name__ = 7
__magic_name__ = 30
__magic_name__ = self.seq_length + self.mem_len
__magic_name__ = 15
__magic_name__ = True
__magic_name__ = True
__magic_name__ = 99
__magic_name__ = [10, 50, 80]
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 128
__magic_name__ = 2
__magic_name__ = 2
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = 0
__magic_name__ = 3
__magic_name__ = self.vocab_size - 1
__magic_name__ = 0.01
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case__ ( self : str ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case__ ( self : Union[str, Any] , a__ : Tuple , a__ : Any , a__ : Union[str, Any] , a__ : int ):
__magic_name__ = TFTransfoXLModel(a__ )
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case__ ( self : List[Any] , a__ : int , a__ : List[str] , a__ : List[str] , a__ : Dict ):
__magic_name__ = TFTransfoXLLMHeadModel(a__ )
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
__magic_name__ , __magic_name__ = model([input_ids_a, mems_a] ).to_tuple()
__magic_name__ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__magic_name__ , __magic_name__ = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case__ ( self : List[Any] , a__ : str , a__ : Optional[int] , a__ : Dict , a__ : List[Any] ):
__magic_name__ = TFTransfoXLForSequenceClassification(a__ )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] ):
__magic_name__ = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs
__magic_name__ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE :Optional[Any] = () if is_tf_available() else ()
__SCREAMING_SNAKE_CASE :Any = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__SCREAMING_SNAKE_CASE :List[str] = False
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :str = False
__SCREAMING_SNAKE_CASE :List[Any] = False
def snake_case__ ( self : int , a__ : int , a__ : List[str] , a__ : Any , a__ : Optional[Any] , a__ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case__ ( self : int ):
__magic_name__ = TFTransfoXLModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , d_embed=37 )
def snake_case__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : int ):
self.model_tester.set_seed()
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def snake_case__ ( self : Optional[int] ):
self.model_tester.set_seed()
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def snake_case__ ( self : Any ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__magic_name__ = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
__magic_name__ = model.get_bias()
assert name is None
else:
__magic_name__ = model.get_output_embeddings()
assert x is None
__magic_name__ = model.get_bias()
assert name is None
def snake_case__ ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case__ ( self : List[Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def snake_case__ ( self : Any ):
pass
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def snake_case__ ( self : int ):
__magic_name__ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__magic_name__ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__magic_name__ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__magic_name__ = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 98 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : int ,lowercase__ : List[str]=1_3 ,lowercase__ : List[str]=1_0 ,lowercase__ : int=3 ,lowercase__ : Tuple=2 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : List[str]=2 ,lowercase__ : List[Any]=True ,lowercase__ : Any=True ,lowercase__ : Optional[int]=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : str=3_7 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Dict=0.1 ,lowercase__ : Any=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Tuple=0.9 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = tubelet_size
__lowercase = num_frames
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowercase = (image_size // patch_size) ** 2
__lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowercase = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = VideoMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = VideoMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.batch_size ,-1 ).bool()
__lowercase = model(lowercase__ ,lowercase__ )
# model only returns predictions for masked patches
__lowercase = mask.sum().item()
__lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE : int = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = VideoMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[str]=False ):
__lowercase = copy.deepcopy(lowercase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowercase = torch.ones((self.model_tester.num_masks,) )
__lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowercase = mask.expand(self.model_tester.batch_size ,-1 ).bool()
__lowercase = bool_masked_pos.to(lowercase__ )
if return_labels:
if model_class in [
*get_values(lowercase__ ),
]:
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = VideoMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if not self.has_attentions:
pass
else:
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
__lowercase = len(lowercase__ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
self.assertEqual(out_len + 1 ,len(lowercase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowercase__ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase__ ) ,lowercase__ )
__lowercase = self.model_tester.seq_length - self.model_tester.num_masks
__lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__lowercase = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_video()
__lowercase = image_processor(lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# add boolean mask, indicating which patches to mask
__lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''' )
__lowercase = torch.load(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowercase = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] ,device=lowercase__ )
self.assertEqual(outputs.logits.shape ,lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowercase = torch.tensor([0.5_1_4_2] ,device=lowercase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowercase__ ,atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=lowercase__ ).to(
lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.tensor(torch.tensor([0.6_4_6_9] ) ,device=lowercase__ )
self.assertTrue(torch.allclose(outputs.loss ,lowercase__ ,atol=1e-4 ) )
| 104 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=50_267 , __SCREAMING_SNAKE_CASE : Dict=1_024 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Dict=4_096 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Tuple=4_096 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : str=1_024 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=100 , __SCREAMING_SNAKE_CASE : Optional[int]=800 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = classifier_dropout
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = use_prompt
__SCREAMING_SNAKE_CASE = prompt_length
__SCREAMING_SNAKE_CASE = prompt_mid_dim
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 331 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ )
__SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
__SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__SCREAMING_SNAKE_CASE = single_char_strings[ch]
__SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(a__ ) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum ):.1f}' )
# two len string
__SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
__SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
__SCREAMING_SNAKE_CASE = two_char_strings[sequence]
__SCREAMING_SNAKE_CASE = int(a__ ) / all_sum
my_sec_sum += prob * math.loga(a__ )
# print second entropy
print(F'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
__SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(a__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 331 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_lowerCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_lowerCamelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_lowerCamelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_lowerCamelCase , default=0 , help='cuda_id.' , )
UpperCAmelCase_ : str = parser.parse_args()
return args
def lowercase__ ( __snake_case : List[str] , __snake_case : Dict , __snake_case : List[str] ):
'''simple docstring'''
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
UpperCAmelCase_ : Union[str, Any] = imgs[0].size
UpperCAmelCase_ : Tuple = Image.new('RGB' , size=(cols * w, rows * h) )
UpperCAmelCase_ : Any = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[int]="robotic cat with wings" , __snake_case : List[Any]=7.5 , __snake_case : List[Any]=50 , __snake_case : Optional[Any]=1 , __snake_case : List[str]=42 , ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = pipeline(
_lowerCamelCase , guidance_scale=_lowerCamelCase , num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , ).images
UpperCAmelCase_ : int = int(math.sqrt(_lowerCamelCase ) )
UpperCAmelCase_ : List[str] = image_grid(_lowerCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCAmelCase = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCAmelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__UpperCAmelCase = unet.to(torch.device('cuda', args.cuda_id))
__UpperCAmelCase = pipeline.to(unet.device)
__UpperCAmelCase , __UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 29 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCAmelCase( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__a = precision
__a = ceil(precision / 14 )
__a = 42_6880 * Decimal(1_0005 ).sqrt()
__a = 1
__a = 1359_1409
__a = Decimal(__lowerCamelCase )
for k in range(1 , __lowerCamelCase ):
__a = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase_ : int = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 197 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[int] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_lowercase : Dict = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase : int = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase : Optional[int] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
assert base_extractor.is_extractable(lowerCamelCase_ )
_lowercase : str = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Union[str, Any] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : List[Any] = output_path.read_text(encoding='utf-8' )
_lowercase : Tuple = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
_lowercase : List[Any] = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase : int = input_paths[compression_format]
if input_path is None:
_lowercase : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
_lowercase : List[Any] = Extractor.infer_extractor_format(lowerCamelCase_ )
assert extractor_format is not None
_lowercase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : Any = output_path.read_text(encoding='utf-8' )
_lowercase : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
import tarfile
_lowercase : Union[str, Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase : Tuple = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
import tarfile
_lowercase : int = tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase : Union[str, Any] = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowerCamelCase_ )
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Dict = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase : List[Any] = insecure_tar_files[insecure_tar_file]
_lowercase : List[str] = tmp_path / 'extracted'
TarExtractor.extract(lowerCamelCase_ , lowerCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase : List[Any] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase : Union[str, Any] = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowerCamelCase_ )
assert zipfile.is_zipfile(str(lowerCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase_ ) # but we're right
| 21 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a__ = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
a__ = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = """ Hello world! cécé herlolip"""
a__ = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
_snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
_snake_case : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case : int = val
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
_snake_case : List[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_snake_case : int = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
_snake_case , _snake_case : List[str] = emb.weight.shape
_snake_case : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]:
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE__ ).eval()
else:
_snake_case : Union[str, Any] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_snake_case : Optional[Any] = checkpoint_path.replace(""".""" , """-""" )
_snake_case : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
_snake_case : str = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_snake_case : Dict = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : str = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
_snake_case : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_snake_case : Optional[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
_snake_case : Optional[Any] = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
_snake_case : str = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """lm_head""" ):
_snake_case : Any = make_linear_from_emb(model.model.shared )
_snake_case : Optional[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
a__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 317 | 0 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase__( UpperCamelCase__ : dict , UpperCamelCase__ : str , UpperCamelCase__ : set , UpperCamelCase__ : set , UpperCamelCase__ : dict , UpperCamelCase__ : dict , UpperCamelCase__ : PriorityQueue , UpperCamelCase__ : dict , UpperCamelCase__ : float | int , )->float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ = cst_fwd.get(UpperCamelCase__ , np.inf )
A__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ = new_cost_f
A__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : dict , UpperCamelCase__ : dict )->int:
A__ = -1
A__ = set()
A__ = set()
A__ = {source: 0}
A__ = {destination: 0}
A__ = {source: None}
A__ = {destination: None}
A__ = PriorityQueue()
A__ = PriorityQueue()
A__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
A__ , A__ = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
A__ = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
A__ = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ = shortest_distance
return shortest_path_distance
a__: Dict = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
a__: Dict = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
from __future__ import annotations
import time
import numpy as np
a__: Optional[Any] = [8, 5, 9, 7]
a__: Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a__: List[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = claim_vector
A__ = allocated_resources_table
A__ = maximum_claim_table
def UpperCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCamelCase ( self ):
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.__need()
A__ = self.__allocated_resources_table
A__ = self.__available_resources()
A__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
A__ = False
for each_need in need_list:
A__ = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
A__ = False
break
if execution:
A__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A__ = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
A__ = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCamelCase ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ ''' '''.join(f"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :List[str] = DDIMPipeline
_UpperCAmelCase :Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase :Any = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCAmelCase :Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : str ):
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
lowerCamelCase_ : Optional[Any] =DDIMScheduler()
lowerCamelCase_ : Optional[int] ={"unet": unet, "scheduler": scheduler}
return components
def UpperCAmelCase__ ( self : Any , snake_case__ : str , snake_case__ : Dict=0 ):
if str(snake_case__ ).startswith("mps" ):
lowerCamelCase_ : Any =torch.manual_seed(snake_case__ )
else:
lowerCamelCase_ : Any =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCamelCase_ : Optional[int] ={
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : str ="cpu"
lowerCamelCase_ : Optional[Any] =self.get_dummy_components()
lowerCamelCase_ : Tuple =self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs(snake_case__ )
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : Any =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase_ : Optional[Any] =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase_ : Optional[Any] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def UpperCAmelCase__ ( self : Tuple ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Tuple ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCAmelCase__ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Union[str, Any] ="google/ddpm-cifar10-32"
lowerCamelCase_ : Dict =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : Optional[Any] =DDIMScheduler()
lowerCamelCase_ : Optional[Any] =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddim.to(snake_case__ )
ddim.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =torch.manual_seed(0 )
lowerCamelCase_ : Tuple =ddim(generator=snake_case__ , eta=0.0 , output_type="numpy" ).images
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : Optional[Any] =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str ="google/ddpm-ema-bedroom-256"
lowerCamelCase_ : List[str] =UNetaDModel.from_pretrained(snake_case__ )
lowerCamelCase_ : int =DDIMScheduler.from_pretrained(snake_case__ )
lowerCamelCase_ : str =DDIMPipeline(unet=snake_case__ , scheduler=snake_case__ )
ddpm.to(snake_case__ )
ddpm.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =ddpm(generator=snake_case__ , output_type="numpy" ).images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ : Optional[int] =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 144 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A__ : Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : List[str]=16 , snake_case__ : Tuple=13 , snake_case__ : Dict=7 , snake_case__ : List[Any]=14 , snake_case__ : List[Any]=10 , snake_case__ : Dict=19 , snake_case__ : List[str]=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : str=True , snake_case__ : int=16 , snake_case__ : Union[str, Any]=2 , snake_case__ : Tuple=4 , snake_case__ : Dict=4 , snake_case__ : int="gelu" , snake_case__ : Dict=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=[1, 2, 3, 4, 5] , snake_case__ : Optional[int]=25 , snake_case__ : Dict=5 , ):
lowerCamelCase_ : Dict =d_model
lowerCamelCase_ : int =parent
lowerCamelCase_ : Optional[Any] =batch_size
lowerCamelCase_ : int =prediction_length
lowerCamelCase_ : Optional[int] =context_length
lowerCamelCase_ : Any =cardinality
lowerCamelCase_ : List[str] =num_time_features
lowerCamelCase_ : List[Any] =lags_sequence
lowerCamelCase_ : Optional[int] =embedding_dimension
lowerCamelCase_ : Union[str, Any] =is_training
lowerCamelCase_ : Union[str, Any] =hidden_size
lowerCamelCase_ : str =num_hidden_layers
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : Any =intermediate_size
lowerCamelCase_ : Union[str, Any] =hidden_act
lowerCamelCase_ : Optional[int] =hidden_dropout_prob
lowerCamelCase_ : Optional[int] =attention_probs_dropout_prob
lowerCamelCase_ : List[Any] =context_length
lowerCamelCase_ : str =prediction_length + label_length
lowerCamelCase_ : int =label_length
lowerCamelCase_ : Union[str, Any] =moving_average
lowerCamelCase_ : str =autocorrelation_factor
def UpperCAmelCase__ ( self : Any ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[Any] =config.context_length + max(config.lags_sequence )
lowerCamelCase_ : Any =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowerCamelCase_ : List[Any] =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCamelCase_ : List[str] =floats_tensor([self.batch_size, _past_length] )
lowerCamelCase_ : Any =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCamelCase_ : Tuple =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCamelCase_ : Optional[Any] =floats_tensor([self.batch_size, config.prediction_length] )
lowerCamelCase_ : Any ={
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =self.get_config()
lowerCamelCase_ : List[Any] =self.prepare_autoformer_inputs_dict(snake_case__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : str =AutoformerModel(config=snake_case__ ).to(snake_case__ ).eval()
lowerCamelCase_ : int =model(**snake_case__ )
lowerCamelCase_ : str =outputs.encoder_last_hidden_state
lowerCamelCase_ : Optional[Any] =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Tuple =model.get_encoder()
encoder.save_pretrained(snake_case__ )
lowerCamelCase_ : Any =AutoformerEncoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =model.create_network_inputs(**snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCamelCase_ : Dict =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowerCamelCase_ : int =encoder(inputs_embeds=snake_case__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCamelCase_ : str =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowerCamelCase_ : Optional[int] =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowerCamelCase_ : Any =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowerCamelCase_ : Optional[Any] =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[str] =model.get_decoder()
decoder.save_pretrained(snake_case__ )
lowerCamelCase_ : str =AutoformerDecoder.from_pretrained(snake_case__ ).to(snake_case__ )
lowerCamelCase_ : List[str] =decoder(
trend=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCAmelCase :Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :int = False
_UpperCAmelCase :int = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Dict = False
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : List[str] =AutoformerModelTester(self )
lowerCamelCase_ : List[str] =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : str =model_class.from_pretrained(snake_case__ , output_loading_info=snake_case__ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Any =inspect.signature(getattr(snake_case__ , "forward" ) )
# The main input is the name of the argument after `self`
lowerCamelCase_ : Optional[Any] =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
lowerCamelCase_ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[Any] =[
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : List[str] =getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCamelCase_ : Dict =getattr(self.model_tester , "decoder_seq_length" , snake_case__ )
lowerCamelCase_ : List[Any] =getattr(self.model_tester , "encoder_seq_length" , snake_case__ )
lowerCamelCase_ : Optional[Any] =getattr(self.model_tester , "d_model" , snake_case__ )
lowerCamelCase_ : List[str] =getattr(self.model_tester , "num_attention_heads" , snake_case__ )
lowerCamelCase_ : Union[str, Any] =d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCamelCase_ : str =True
lowerCamelCase_ : int =False
lowerCamelCase_ : Any =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : str =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : List[str] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowerCamelCase_ : Optional[Any] =len(snake_case__ )
lowerCamelCase_ : List[Any] =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case__ , snake_case__ )
# decoder attentions
lowerCamelCase_ : Union[str, Any] =outputs.decoder_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowerCamelCase_ : Tuple =outputs.cross_attentions
self.assertIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowerCamelCase_ : Tuple =True
lowerCamelCase_ : Optional[int] =True
lowerCamelCase_ : Tuple =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Dict =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 2 , len(snake_case__ ) )
lowerCamelCase_ : Union[str, Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ):
super().test_retain_grad_hidden_states_attentions()
def _snake_case ( lowerCamelCase__ : Tuple="train-batch.pt" ) -> Any:
lowerCamelCase_ : Tuple =hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowerCamelCase__ , repo_type="dataset" )
lowerCamelCase_ : List[Any] =torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
return batch
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int =AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : List[str] =prepare_batch()
with torch.no_grad():
lowerCamelCase_ : List[Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
lowerCamelCase_ : Union[str, Any] =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Dict =torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : str =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Optional[int] =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
lowerCamelCase_ : List[Any] =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case__ , atol=snake_case__ ) )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : int =AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case__ )
lowerCamelCase_ : Dict =prepare_batch("val-batch.pt" )
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
lowerCamelCase_ : Tuple =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case__ )
lowerCamelCase_ : List[str] =torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case__ )
lowerCamelCase_ : Any =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case__ , rtol=1E-1 ) )
| 144 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A__ ( _snake_case ):
lowercase = "altclip_text_model"
def __init__( self , UpperCamelCase__=250002 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=514 , UpperCamelCase__=1 , UpperCamelCase__=0.02 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-0_5 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=768 , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = initializer_factor
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = project_dim
class A__ ( _snake_case ):
lowercase = "altclip_vision_model"
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=32 , UpperCamelCase__="quick_gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = hidden_size
A_ = intermediate_size
A_ = projection_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_channels
A_ = patch_size
A_ = image_size
A_ = initializer_range
A_ = initializer_factor
A_ = attention_dropout
A_ = layer_norm_eps
A_ = hidden_act
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
A_ , A_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class A__ ( _snake_case ):
lowercase = "altclip"
lowercase = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=2.6592 , **UpperCamelCase__ ) -> int:
'''simple docstring'''
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A_ = kwargs.pop("""text_config_dict""" , UpperCamelCase__ )
A_ = kwargs.pop("""vision_config_dict""" , UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A_ = {}
# This is the complete result when using `text_config_dict`.
A_ = AltCLIPTextConfig(**UpperCamelCase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A_ = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A_ = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(UpperCamelCase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A_ = {}
# This is the complete result when using `vision_config_dict`.
A_ = AltCLIPVisionConfig(**UpperCamelCase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A_ = {
str(UpperCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A_ = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
A_ = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(UpperCamelCase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A_ = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A_ = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A_ = AltCLIPTextConfig(**UpperCamelCase__ )
A_ = AltCLIPVisionConfig(**UpperCamelCase__ )
A_ = projection_dim
A_ = logit_scale_init_value
A_ = 1.0
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = copy.deepcopy(self.__dict__ )
A_ = self.text_config.to_dict()
A_ = self.vision_config.to_dict()
A_ = self.__class__.model_type
return output
| 101 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int:
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 | 1 |
import math
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Tuple=0 ) -> Dict: # a graph with Node 0,1,...,N-1
UpperCAmelCase_ : Union[str, Any] = n
UpperCAmelCase_ : int = [
[math.inf for j in range(0 ,lowerCamelCase_ )] for i in range(0 ,lowerCamelCase_ )
] # adjacency matrix for weight
UpperCAmelCase_ : List[Any] = [
[math.inf for j in range(0 ,lowerCamelCase_ )] for i in range(0 ,lowerCamelCase_ )
] # dp[i][j] stores minimum distance from i to j
def A__ ( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = w
def A__ ( self: str ) -> Tuple:
for k in range(0 ,self.n ):
for i in range(0 ,self.n ):
for j in range(0 ,self.n ):
UpperCAmelCase_ : List[str] = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] )
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Union[str, Any]:
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 345 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[int] ) -> Any:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = -1
UpperCAmelCase_ : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Any = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : Dict = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase_ : str = TextIteratorStreamer(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : str = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
UpperCAmelCase_ : int = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = -1
UpperCAmelCase_ : Tuple = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ )
UpperCAmelCase_ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase_ : List[Any] = TextStreamer(lowerCamelCase_ ,skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=10 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase_ : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: str ) -> str:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Any = -1
UpperCAmelCase_ : Union[str, Any] = torch.ones((1, 5) ,device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase_ : Union[str, Any] = TextStreamer(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ ,max_new_tokens=1 ,do_sample=lowerCamelCase_ ,streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase_ : List[str] = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase_ : Dict = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCAmelCase_ : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = TextIteratorStreamer(lowerCamelCase_ ,timeout=0.0_0_1 )
UpperCAmelCase_ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCAmelCase_ : Dict = Thread(target=model.generate ,kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : Tuple ={
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] =['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int =[
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 94 |
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94 | 1 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
__UpperCamelCase =number_of_bytes // partitions
__UpperCamelCase =[]
for i in range(_lowerCamelCase ):
__UpperCamelCase =i * bytes_per_partition + 1
__UpperCamelCase =(
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | """simple docstring"""
import numpy as np
from PIL import Image
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Dict = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Union[str, Any] = 0
# compute the shape of the output matrix
__lowerCamelCase : Any = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase : int = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : Any = 0
__lowerCamelCase : Optional[int] = 0
return updated_arr
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: int , _lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : str = 0
# compute the shape of the output matrix
__lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Tuple = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 135 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase :
def __init__( self : List[Any], a_ : Optional[Any], a_ : Optional[int]=13, a_ : List[Any]=7, a_ : List[str]=True, a_ : Any=True, a_ : Optional[int]=True, a_ : str=True, a_ : List[str]=99, a_ : int=32, a_ : Optional[int]=2, a_ : Tuple=4, a_ : Optional[int]=37, a_ : Optional[Any]="gelu", a_ : Optional[int]=0.1, a_ : Optional[int]=0.1, a_ : Tuple=512, a_ : Dict=16, a_ : Union[str, Any]=2, a_ : Optional[int]=0.02, a_ : Optional[Any]=False, a_ : Union[str, Any]=True, a_ : str="None", a_ : str=3, a_ : int=4, a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=a_, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Tuple, a_ : str, a_ : Dict, a_ : int, a_ : Dict, a_ : Optional[int], a_ : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel(config=a_ )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict, a_ : List[str], a_ : Optional[int], a_ : Tuple, a_ : int, a_ : int, a_ : Tuple, a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForMaskedLM(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, a_ : str, a_ : List[str], a_ : Dict, a_ : List[Any], a_ : Optional[int], a_ : Optional[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForSequenceClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict, a_ : List[Any], a_ : Optional[int], a_ : Dict, a_ : List[str], a_ : Optional[Any], a_ : Tuple, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForTokenClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str], a_ : Any, a_ : Any, a_ : Dict, a_ : Optional[int], a_ : Any, a_ : int, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForQuestionAnswering(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : List[Any] = False
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase):
@unittest.skip(reason="Model not available yet" )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCamelCase__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ = model(a_, attention_mask=a_ )[0]
UpperCamelCase__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], a_, atol=1e-4 ) | 356 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs | 31 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 'dpr'
def __init__( self, lowercase_=30522, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=0, lowercase_="absolute", lowercase_ = 0, **lowercase_, ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowercase_, **lowercase_ )
a__ =vocab_size
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_act
a__ =intermediate_size
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =initializer_range
a__ =layer_norm_eps
a__ =projection_dim
a__ =position_embedding_type
| 188 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=7, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_=False, lowercase_=False, lowercase_=2, lowercase_=99, lowercase_=0, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=12, lowercase_=2, lowercase_=0.02, lowercase_=3, lowercase_=4, lowercase_="last", lowercase_=None, lowercase_=None, ) -> List[Any]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =seq_length
a__ =is_training
a__ =use_input_lengths
a__ =use_token_type_ids
a__ =use_labels
a__ =gelu_activation
a__ =sinusoidal_embeddings
a__ =causal
a__ =asm
a__ =n_langs
a__ =vocab_size
a__ =n_special
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =max_position_embeddings
a__ =type_vocab_size
a__ =type_sequence_label_size
a__ =initializer_range
a__ =num_labels
a__ =num_choices
a__ =summary_type
a__ =use_proj
a__ =scope
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
a__ =random_attention_mask([self.batch_size, self.seq_length] )
a__ =None
if self.use_input_lengths:
a__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ =None
if self.use_token_type_ids:
a__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
a__ =None
a__ =None
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
a__ =ids_tensor([self.batch_size], 2 ).float()
a__ =ids_tensor([self.batch_size], self.num_choices )
a__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, lengths=lowercase_, langs=lowercase_ )
a__ =model(lowercase_, langs=lowercase_ )
a__ =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> str:
"""simple docstring"""
a__ =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, token_type_ids=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, p_mask=lowercase_, )
a__ =model(
lowercase_, start_positions=lowercase_, end_positions=lowercase_, cls_index=lowercase_, is_impossible=lowercase_, )
((a__), ) =result_with_labels.to_tuple()
a__ =model(lowercase_, start_positions=lowercase_, end_positions=lowercase_ )
((a__), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[Any]:
"""simple docstring"""
a__ =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_ )
a__ =model(lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
a__ =self.num_labels
a__ =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =model(lowercase_, attention_mask=lowercase_, labels=lowercase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Dict:
"""simple docstring"""
a__ =self.num_choices
a__ =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
a__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
a__ =model(
lowercase_, attention_mask=lowercase_, token_type_ids=lowercase_, labels=lowercase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> str:
"""simple docstring"""
a__ =super()._prepare_for_class(lowercase_, lowercase_, return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
a__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowercase_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =FlaubertModelTester(self )
a__ =ConfigTester(self, config_class=lowercase_, emb_dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ =True
a__ =model_class(config=lowercase_ )
a__ =self._prepare_for_class(lowercase_, lowercase_ )
a__ =torch.jit.trace(
lowercase_, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_, os.path.join(lowercase_, '''traced_model.pt''' ) )
a__ =torch.jit.load(os.path.join(lowercase_, '''traced_model.pt''' ), map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ), inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
a__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
a__ =model(lowercase_ )[0]
a__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowercase_ )
a__ =torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowercase_, atol=1E-4 ) )
| 188 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_snake_case = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_a : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_a , _a : int = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
else:
_a : Tuple = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ )
_a , _a : Any = ProphetNetForConditionalGeneration.from_pretrained(
UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
_a : List[Any] = ["""key_proj""", """value_proj""", """query_proj"""]
_a : Tuple = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_a : Dict = key.split(""".""" )
if attributes[0] == "lm_head":
_a : Any = prophet
_a : Tuple = prophet_old
else:
_a : Any = prophet.prophetnet
_a : int = prophet_old.model
_a : List[Any] = False
for attribute in attributes:
if attribute in mapping:
_a : Any = mapping[attribute]
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0:
_a : str = attribute
elif hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_a : str = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : Tuple = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
_a : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Union[str, Any] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
_a : Optional[int] = True
break
elif attribute in special_keys and hasattr(UpperCamelCase__ , """in_proj_weight""" ):
_a : Optional[int] = old_model.in_proj_weight.shape[0] // 3
_a : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Dict = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
_a : List[Any] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
_a : Dict = True
break
if attribute.isdigit():
_a : Optional[Any] = model[int(UpperCamelCase__ )]
_a : Dict = old_model[int(UpperCamelCase__ )]
else:
_a : List[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_attribute == "":
_a : List[Any] = old_model
else:
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
_a : List[str] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 324 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ = 1_6 ):
'''simple docstring'''
_a : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_a : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : Tuple = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Union[str, Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : int = 1_6
elif accelerator.mixed_precision != "no":
_a : int = 8
else:
_a : str = None
return tokenizer.pad(
UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_a : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_a : List[str] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase__ ) == "1":
_a : str = 2
# Initialize accelerator
_a : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Any = config["""lr"""]
_a : Union[str, Any] = int(config["""num_epochs"""] )
_a : str = int(config["""seed"""] )
_a : List[Any] = int(config["""batch_size"""] )
_a : Tuple = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_a : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_a : str = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase__ )
_a , _a : Optional[int] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_a : List[str] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Optional[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Optional[Any] = model(**UpperCamelCase__ )
_a : str = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_a : Union[str, Any] = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Dict = model(**UpperCamelCase__ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
_a , _a : int = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_a : str = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_a : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_a : Optional[Any] = parser.parse_args()
_a : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 324 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Any = PriorTransformer
snake_case__ : List[Any] = """hidden_states"""
@property
def _A ( self : List[Any] ):
UpperCamelCase :List[str] = 4
UpperCamelCase :Optional[int] = 8
UpperCamelCase :Dict = 7
UpperCamelCase :Tuple = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A ( self : Optional[int] , __lowerCamelCase : int=0 ):
torch.manual_seed(__lowerCamelCase )
UpperCamelCase :Dict = 4
UpperCamelCase :Optional[Any] = 8
UpperCamelCase :List[Any] = 7
UpperCamelCase :str = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :int = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A ( self : Optional[int] ):
return (4, 8)
@property
def _A ( self : Optional[int] ):
return (4, 8)
def _A ( self : Optional[int] ):
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCamelCase :int = self.dummy_input
return init_dict, inputs_dict
def _A ( self : Dict ):
UpperCamelCase , UpperCamelCase :Optional[int] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A ( self : int ):
UpperCamelCase , UpperCamelCase :str = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase :Tuple = self.model_class(**__lowerCamelCase )
UpperCamelCase :Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase :Any = [*signature.parameters.keys()]
UpperCamelCase :int = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :Optional[int] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
UpperCamelCase :Optional[Any] = model.to(__lowerCamelCase )
if hasattr(__lowerCamelCase , """set_default_attn_processor""" ):
model.set_default_attn_processor()
UpperCamelCase :Dict = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase :Dict = model(**__lowerCamelCase )[0]
UpperCamelCase :int = output[0, :5].flatten().cpu()
print(__lowerCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase :Optional[int] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(__lowerCamelCase , __lowerCamelCase , rtol=1E-2 ) )
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int , __lowerCamelCase : Tuple=1 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Union[str, Any]=77 , __lowerCamelCase : Union[str, Any]=0 ):
torch.manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = batch_size
UpperCamelCase :Tuple = embedding_dim
UpperCamelCase :Optional[int] = num_embeddings
UpperCamelCase :List[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :List[Any] = torch.randn((batch_size, embedding_dim) ).to(__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _A ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(__lowerCamelCase )
UpperCamelCase :Any = self.get_dummy_seed_input(seed=__lowerCamelCase )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__lowerCamelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCamelCase :Dict = sample[0, :8].flatten().cpu()
print(__lowerCamelCase )
UpperCamelCase :List[str] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
| 38 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a ( a__ ):
snake_case__ = '''encodec'''
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=2_40_00 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=1_28 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=10_24 , _snake_case=None , _snake_case=True , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 309 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCamelCase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCamelCase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
A: Union[str, Any] = []
for i in range(len(__lowercase ) ):
A: Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A: List[Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A: List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowercase )
return next_generation
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Dict = []
for _ in range(__lowercase ):
# Create output image
A: Optional[Any] = Image.new('''RGB''' , (len(cells[0] ), len(__lowercase )) )
A: Optional[Any] = img.load()
# Save cells to image
for x in range(len(__lowercase ) ):
for y in range(len(cells[0] ) ):
A: List[Any] = 2_5_5 - cells[y][x] * 2_5_5
A: str = (colour, colour, colour)
# Save image
images.append(__lowercase )
A: Optional[int] = new_generation(__lowercase )
return images
if __name__ == "__main__":
UpperCamelCase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 319 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 210 | 0 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> int:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : torch.Tensor , UpperCAmelCase__ : List[torch.tensor] , UpperCAmelCase__ : List[float] , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[torch.Tensor] = None , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ , self.nets ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = controlnet(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# merge samples
if i == 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_samples, mid_sample
else:
__SCREAMING_SNAKE_CASE = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Callable = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[str] = None , ) -> int:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase__ , is_main_process=UpperCAmelCase__ , save_function=UpperCAmelCase__ , safe_serialization=UpperCAmelCase__ , variant=UpperCAmelCase__ , )
idx += 1
__SCREAMING_SNAKE_CASE = model_path_to_save + F"""_{idx}"""
@classmethod
def UpperCAmelCase_ ( cls : List[str] , UpperCAmelCase__ : Optional[Union[str, os.PathLike]] , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__SCREAMING_SNAKE_CASE = pretrained_model_path
while os.path.isdir(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
controlnets.append(UpperCAmelCase__ )
idx += 1
__SCREAMING_SNAKE_CASE = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(UpperCAmelCase__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(UpperCAmelCase__ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(UpperCAmelCase__ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(UpperCAmelCase__ )
| 195 |
"""simple docstring"""
a__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input("Enter message: " )
__SCREAMING_SNAKE_CASE = input("Enter key [alphanumeric]: " )
__SCREAMING_SNAKE_CASE = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
__SCREAMING_SNAKE_CASE = "encrypt"
__SCREAMING_SNAKE_CASE = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("d" ):
__SCREAMING_SNAKE_CASE = "decrypt"
__SCREAMING_SNAKE_CASE = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 195 | 1 |
from collections import Counter
from timeit import timeit
def __A ( __lowerCAmelCase = "" , )-> bool:
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def __A ( __lowerCAmelCase = "" )-> bool:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __A ( __lowerCAmelCase = "" )-> None:
"""simple docstring"""
print('\nFor string = ' , __lowerCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
_a = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
_a = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 39 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39 | 1 |
def lowerCAmelCase__ ( _a : int , _a : Tuple ):
while second != 0:
snake_case_ : Union[str, Any] = first & second
first ^= second
snake_case_ : List[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : List[Any] = int(input('''Enter the first number: ''').strip())
lowercase : List[str] = int(input('''Enter the second number: ''').strip())
print(F"""{add(first, second) = }""")
| 362 |
def lowerCAmelCase__ ( _a : int = 50 ):
snake_case_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 36 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =IFInpaintingPipeline
lowercase_ : Optional[int] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowercase_ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : str =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(A__)).to(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 101 | 1 |
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager | 340 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Tuple=False ) -> Tuple:
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
snake_case_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[Any]:
return unittest.skip('''Test was skipped''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Dict:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Union[str, Any]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Dict:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> Tuple:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> Tuple:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Dict:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Tuple:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str=None , snake_case_ : Union[str, Any]=None ) -> int:
if test_case is None:
return partial(snake_case_ , version=snake_case_ )
return unittest.skipUnless(is_torch_version('''>=''' , snake_case_ ) , f"""test requires torch version >= {version}""" )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : int ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Optional[Any]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Dict:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(snake_case_ )
snake_case_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( snake_case_ : int ) -> Tuple:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = True
@classmethod
def a (cls : int ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
@classmethod
def a (cls : List[str] ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def a (self : str ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] , a__ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
__snake_case = mocks if isinstance(a__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
__snake_case = AcceleratorState()
__snake_case = tensor[None].clone().to(state.device )
__snake_case = gather(snake_case_ ).cpu()
__snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case_ ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Tuple , a__ : int , a__ : Dict , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple ) -> Optional[Any]:
while True:
__snake_case = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Any=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(snake_case_ ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Dict="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ , snake_case_ , file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=snake_case_ , )
return _RunOutput(await p.wait() , snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=180 , snake_case_ : Optional[int]=False , snake_case_ : str=True ) -> _RunOutput:
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) )
__snake_case = ''' '''.join(snake_case_ )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
pass
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : List[str]=False ) -> Tuple:
try:
__snake_case = subprocess.check_output(snake_case_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case_ , '''decode''' ):
__snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(snake_case_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 24 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 174 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
_UpperCamelCase : List[Any] = f'''https://www.google.com/search?q={query}&num=100'''
_UpperCamelCase : Optional[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
_UpperCamelCase : Any = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
_UpperCamelCase : Optional[int] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 186 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = '▁'
_UpperCamelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_UpperCamelCase : Optional[int] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
_UpperCamelCase : List[str] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
_UpperCamelCase : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( UpperCAmelCase ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
__magic_name__ = []
def __init__( self : List[str] , A : Union[str, Any] , A : List[Any]="<s>" , A : Dict="</s>" , A : List[Any]="</s>" , A : Any="<s>" , A : Dict="<unk>" , A : Any="<pad>" , A : Optional[int]="<mask>" , A : str=None , A : Tuple=None , A : List[str]=None , A : Optional[Dict[str, Any]] = None , A : Any=None , A : List[Any]=False , **A : Tuple , ):
'''simple docstring'''
a : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
a : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , tokenizer_file=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A , **A , )
a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
a : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
a : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Any = 1
a : int = len(self.sp_model )
a : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A )
}
a : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
a : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a : Optional[int] = src_lang if src_lang is not None else 'eng_Latn'
a : List[Any] = self.lang_code_to_id[self._src_lang]
a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
a : Dict = self.__dict__.copy()
a : int = None
a : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , A : Any ):
'''simple docstring'''
a : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Any = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase__ ( self : Dict , A : str ):
'''simple docstring'''
a : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
a : Tuple = [1] * len(self.prefix_tokens )
a : int = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def lowerCamelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ):
'''simple docstring'''
a : List[str] = [self.sep_token_id]
a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : List[str] , A : Optional[int] , A : str , A : Optional[str] , A : Optional[str] , **A : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a : Any = src_lang
a : Any = self(A , add_special_tokens=A , return_tensors=A , **A )
a : Tuple = self.convert_tokens_to_ids(A )
a : Optional[Any] = tgt_lang_id
return inputs
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : Any , A : str ):
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCamelCase__ ( self : Union[str, Any] , A : Tuple ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : int = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , A : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : List[str] , A : Dict ):
'''simple docstring'''
a : List[str] = ''.join(A ).replace(A , ' ' ).strip()
return out_string
def lowerCamelCase__ ( self : Any , A : str , A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a : Optional[int] = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , 'wb' ) as fi:
a : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Optional[int] , ):
'''simple docstring'''
a : Union[str, Any] = src_lang
a : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ ( self : Union[str, Any] , A : Dict ):
'''simple docstring'''
a : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
a : List[Any] = []
a : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , A : str ):
'''simple docstring'''
a : Tuple = self.lang_code_to_id[lang]
if self.legacy_behaviour:
a : List[str] = []
a : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
a : Union[str, Any] = [self.cur_lang_code]
a : List[str] = [self.eos_token_id]
| 186 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __magic_name__ ( snake_case__ ):
'''simple docstring'''
__UpperCamelCase = "deberta-v2"
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1e-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
"""simple docstring"""
super().__init__(**_a )
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = relative_attention
lowerCamelCase = max_relative_positions
lowerCamelCase = pad_token_id
lowerCamelCase = position_biased_input
# Backwards compatibility
if type(_a ) == str:
lowerCamelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCamelCase = pos_att_type
lowerCamelCase = vocab_size
lowerCamelCase = layer_norm_eps
lowerCamelCase = kwargs.get("""pooler_hidden_size""" , _a )
lowerCamelCase = pooler_dropout
lowerCamelCase = pooler_hidden_act
class __magic_name__ ( snake_case__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return 12
def _lowerCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
"""simple docstring"""
lowerCamelCase = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 291 | '''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__SCREAMING_SNAKE_CASE : Optional[int] = 256_047
__SCREAMING_SNAKE_CASE : Optional[int] = 256_145
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: int = NllbTokenizer
__UpperCamelCase: Tuple = NllbTokenizerFast
__UpperCamelCase: Union[str, Any] = True
__UpperCamelCase: Dict = True
__UpperCamelCase: Optional[Any] = {}
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = NllbTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Dict ):
_UpperCAmelCase : Tuple = NllbTokenizer(A , keep_accents=A )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _A ( self : List[Any] ):
_UpperCAmelCase : Any = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : List[str] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : str = tokenizer_r.save_pretrained(A , legacy_format=A )
_UpperCAmelCase : str = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
_UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(A , legacy_format=A )
_UpperCAmelCase : Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(A )
_UpperCAmelCase : Optional[int] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
def _A ( self : Tuple ):
if not self.test_seqaseq:
return
_UpperCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_UpperCAmelCase : Optional[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_UpperCAmelCase : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_UpperCAmelCase : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=A , tgt_texts=A , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_UpperCAmelCase : Tuple = tokenizer.prepare_seqaseq_batch(
A , tgt_texts=A , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=A , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , A )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def _A ( self : List[Any] ):
pass
def _A ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Any = [AddedToken("<special>" , lstrip=A )]
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A )
_UpperCAmelCase : Dict = tokenizer_r.encode("Hey this is a <special> token" )
_UpperCAmelCase : Any = tokenizer_r.encode("<special>" , add_special_tokens=A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A , )
_UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
A , additional_special_tokens=A , **A )
_UpperCAmelCase : Union[str, Any] = tokenizer_p.encode("Hey this is a <special> token" )
_UpperCAmelCase : Any = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(A , A )
self.assertEqual(A , A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = "facebook/nllb-200-distilled-600M"
__UpperCamelCase: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__UpperCamelCase: str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__UpperCamelCase: str = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _A ( cls : int ):
_UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_UpperCAmelCase : Union[str, Any] = 1
return cls
def _A ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256057 )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _A ( self : Tuple ):
self.assertIn(A , self.tokenizer.all_special_ids )
# fmt: off
_UpperCAmelCase : List[Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
_UpperCAmelCase : Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
_UpperCAmelCase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , A )
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : Tuple = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , A )
self.assertEqual(len(A ) , A )
def _A ( self : Dict ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256203, 3] )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = tempfile.mkdtemp()
_UpperCAmelCase : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
_UpperCAmelCase : Tuple = NllbTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _A ( self : Dict ):
_UpperCAmelCase : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(A , A )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_UpperCAmelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(A , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _A ( self : str ):
_UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="pt" )
_UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="pt" )
_UpperCAmelCase : List[Any] = targets["input_ids"]
_UpperCAmelCase : Union[str, Any] = shift_tokens_right(
A , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _A ( self : List[Any] ):
_UpperCAmelCase : str = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"input_ids": [[256047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 256057,
} , )
@require_torch
def _A ( self : Any ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Any = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : str = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 31 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
while b:
UpperCAmelCase , UpperCAmelCase = b, a % b
return a
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return a if b == 0 else euclidean_gcd_recursive(_lowercase , a % b )
def _lowerCAmelCase ( ):
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
from collections import deque
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :str , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = process_name # process name
UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase = arrival_time
UpperCAmelCase = burst_time # remaining burst time
UpperCAmelCase = 0 # total time of the process wait in ready queue
UpperCAmelCase = 0 # time from arrival time to completion time
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :int , lowercase_ :list[int] , lowercase_ :deque[Process] , lowercase_ :int , ) -> None:
# total number of mlfq's queues
UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase = queue
# current time
UpperCAmelCase = current_time
# finished process is in this sequence queue
UpperCAmelCase = deque()
def UpperCAmelCase__ ( self :Optional[int] ) -> list[str]:
UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase__ ( self :List[str] , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase__ ( self :Dict , lowercase_ :list[Process] ) -> list[int]:
UpperCAmelCase = []
for i in range(len(lowercase_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase__ ( self :str , lowercase_ :deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def UpperCAmelCase__ ( self :int , lowercase_ :Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :deque[Process] ) -> deque[Process]:
UpperCAmelCase = deque() # sequence deque of finished process
while len(lowercase_ ) != 0:
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase = 0
# set the process's turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase__ ( self :Tuple , lowercase_ :deque[Process] , lowercase_ :int ) -> tuple[deque[Process], deque[Process]]:
UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase_ ) ):
UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase = 0
# set the finish time
UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase_ )
self.finish_queue.extend(lowercase_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase__ ( self :Optional[Any] ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase , UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
snake_case_ = Process("""P1""", 0, 53)
snake_case_ = Process("""P2""", 0, 17)
snake_case_ = Process("""P3""", 0, 68)
snake_case_ = Process("""P4""", 0, 24)
snake_case_ = 3
snake_case_ = [17, 25]
snake_case_ = deque([Pa, Pa, Pa, Pa])
snake_case_ = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 181 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "ClapFeatureExtractor"
__SCREAMING_SNAKE_CASE = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> List[Any]:
_A : Union[str, Any] = kwargs.pop("sampling_rate" , __lowerCamelCase)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
_A : List[str] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if audios is not None:
_A : Tuple = self.feature_extractor(
__lowerCamelCase , sampling_rate=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and audios is not None:
_A : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Tuple:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
_A : Union[str, Any] = self.tokenizer.model_input_names
_A : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 11 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase__ ( ctypes.Structure ):
"""simple docstring"""
__a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : Dict = CursorInfo()
__UpperCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if os.name == "nt":
__UpperCAmelCase : str = CursorInfo()
__UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
__UpperCAmelCase : Union[str, Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCamelCase , ctypes.byref(_UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def lowerCamelCase ( ) -> str:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 115 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_SCREAMING_SNAKE_CASE = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_SCREAMING_SNAKE_CASE = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[int]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=_snake_case , use_stemmer=_snake_case )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(_snake_case , _snake_case ):
UpperCamelCase = scorer.score(_snake_case , _snake_case )
if use_aggregator:
aggregator.add_scores(_snake_case )
else:
scores.append(_snake_case )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 361 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
UpperCamelCase = CLIPTextModel(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = CLIPTextModelWithProjection(lowerCamelCase_ )
UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCamelCase_ )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCamelCase = image / 2 + 0.5
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = sd_pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
UpperCamelCase = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs["""prompt"""]]
UpperCamelCase = sd_pipe(**lowerCamelCase_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = sd_pipe.encode_prompt(lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
UpperCamelCase = sd_pipe(
**lowerCamelCase_ , prompt_embeds=lowerCamelCase_ , negative_prompt_embeds=lowerCamelCase_ , pooled_prompt_embeds=lowerCamelCase_ , negative_pooled_prompt_embeds=lowerCamelCase_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict="cpu" , lowerCamelCase_ : List[str]=torch.floataa , lowerCamelCase_ : Tuple=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 64, 64) )
UpperCamelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_inputs(lowerCamelCase_ )
UpperCamelCase = pipe(**lowerCamelCase_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 165 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a_ (_a ):
__lowerCAmelCase : List[str] = """encodec"""
def __init__( self , snake_case_=[1.5, 3.0, 6.0, 12.0, 24.0] , snake_case_=2_4_0_0_0 , snake_case_=1 , snake_case_=False , snake_case_=None , snake_case_=None , snake_case_=1_2_8 , snake_case_=3_2 , snake_case_=1 , snake_case_=[8, 5, 4, 2] , snake_case_="weight_norm" , snake_case_=7 , snake_case_=7 , snake_case_=3 , snake_case_=2 , snake_case_=True , snake_case_="reflect" , snake_case_=2 , snake_case_=2 , snake_case_=1.0 , snake_case_=1_0_2_4 , snake_case_=None , snake_case_=True , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = target_bandwidths
_lowerCAmelCase : Any = sampling_rate
_lowerCAmelCase : Tuple = audio_channels
_lowerCAmelCase : str = normalize
_lowerCAmelCase : Any = chunk_length_s
_lowerCAmelCase : List[Any] = overlap
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Tuple = num_filters
_lowerCAmelCase : Dict = num_residual_layers
_lowerCAmelCase : Union[str, Any] = upsampling_ratios
_lowerCAmelCase : int = norm_type
_lowerCAmelCase : Tuple = kernel_size
_lowerCAmelCase : int = last_kernel_size
_lowerCAmelCase : str = residual_kernel_size
_lowerCAmelCase : Tuple = dilation_growth_rate
_lowerCAmelCase : Dict = use_causal_conv
_lowerCAmelCase : int = pad_mode
_lowerCAmelCase : Optional[Any] = compress
_lowerCAmelCase : str = num_lstm_layers
_lowerCAmelCase : List[Any] = trim_right_ratio
_lowerCAmelCase : Dict = codebook_size
_lowerCAmelCase : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
_lowerCAmelCase : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**snake_case_ )
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCamelCase ( self ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 309 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 | 1 |
import os
def snake_case_ ():
'''simple docstring'''
_a = os.path.dirname(os.path.realpath(UpperCamelCase ) )
_a = os.path.join(UpperCamelCase , '''triangle.txt''' )
with open(UpperCamelCase ) as f:
_a = f.readlines()
_a = []
for line in triangle:
_a = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_a = a[i - 1][j] if j != len(a[i - 1] ) else 0
_a = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 360 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=_a ):
lowercase_ = ['torch', 'scipy']
def __init__( self : Tuple , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 179 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCAmelCase :
def __init__( self :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any]=13 , lowercase_ :Any=30 , lowercase_ :Optional[int]=2 , lowercase_ :Tuple=3 , lowercase_ :str=True , lowercase_ :str=True , lowercase_ :List[str]=32 , lowercase_ :str=2 , lowercase_ :Optional[int]=4 , lowercase_ :str=37 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Any=0.1 , lowercase_ :Optional[Any]=0.1 , lowercase_ :Optional[int]=10 , lowercase_ :Tuple=0.0_2 , lowercase_ :Optional[int]=3 , lowercase_ :Tuple=None , lowercase_ :str=2 , )-> Union[str, Any]:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 2
def UpperCAmelCase_ ( self :str )-> Any:
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :str )-> List[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] )-> List[str]:
A__ = TFDeiTModel(config=_a )
A__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :int , lowercase_ :int , lowercase_ :Any , lowercase_ :List[str] )-> Union[str, Any]:
A__ = TFDeiTForMaskedImageModeling(config=_a )
A__ = model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = TFDeiTForMaskedImageModeling(_a )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :str )-> Optional[Any]:
A__ = self.type_sequence_label_size
A__ = TFDeiTForImageClassification(_a )
A__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFDeiTForImageClassification(_a )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a_ , a_ , unittest.TestCase ):
__lowercase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :int )-> Any:
A__ = TFDeiTModelTester(self )
A__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
pass
def UpperCAmelCase_ ( self :str )-> str:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Dense ) )
def UpperCAmelCase_ ( self :int )-> str:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_a )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCAmelCase_ ( self :Dict )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def UpperCAmelCase_ ( self :List[str] )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def UpperCAmelCase_ ( self :Any , lowercase_ :Optional[Any] , lowercase_ :Dict , lowercase_ :Tuple=False )-> Dict:
A__ = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase_ ( self :Dict )-> Any:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def UpperCamelCase ( ):
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :Any )-> Dict:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self :int )-> Optional[int]:
A__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_a , return_tensors="tf" )
# forward pass
A__ = model(**_a )
# verify the logits
A__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
A__ = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 237 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class a__ ( a_ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ):
super().__init__(**_a )
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
lowercase : str = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase : List[str] = get_size_dict(_a , param_name="crop_size" )
lowercase : int = do_resize
lowercase : Optional[int] = size
lowercase : str = resample
lowercase : List[Any] = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Optional[int] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def __magic_name__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ):
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Union[str, Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
lowercase : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
return flip_channel_order(_a , data_format=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_a , default_to_square=_a )
lowercase : int = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_a , param_name="crop_size" )
lowercase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase : int = [self.flip_channel_order(image=_a ) for image in images]
lowercase : int = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowercase : Tuple = target_sizes.numpy()
lowercase : List[Any] = []
for idx in range(len(_a ) ):
lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase : str = logits.argmax(dim=1 )
lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 202 | 0 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 359 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __a : Dict , __a : Tuple , __a : int , __a : Dict="attention" ):
'''simple docstring'''
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def __magic_name__ ( __a : Dict , __a : Any , __a : Dict , __a : Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCamelCase__ = (wi_a, wi_a)
else:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def __magic_name__ ( __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Dict ):
'''simple docstring'''
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def __magic_name__ ( __a : dict , *, __a : int , __a : bool ):
'''simple docstring'''
UpperCamelCase__ = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase__ = {"""/""".join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __a )
UpperCamelCase__ = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """encoder""" , """attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ = tax_mlp_lookup(__a , __a , """encoder""" , __a )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCamelCase__ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """decoder""" , """self_attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ = tax_mlp_lookup(__a , __a , """decoder""" , __a )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old["""decoder/decoder_norm/scale"""]
UpperCamelCase__ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ = old["""decoder/logits_dense/kernel"""].T
return new
def __magic_name__ ( __a : List[Any] , __a : bool ):
'''simple docstring'''
UpperCamelCase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase__ = state_dict["""shared.weight"""]
return state_dict
def __magic_name__ ( __a : Optional[int] , __a : Optional[int] , __a : int , __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = checkpoints.load_tax_checkpoint(__a )
UpperCamelCase__ = convert_tax_to_pytorch(__a , num_layers=config.num_layers , is_encoder_only=__a )
UpperCamelCase__ = make_state_dict(__a , __a )
model.load_state_dict(__a , strict=__a )
def __magic_name__ ( __a : Optional[Any] , __a : Optional[Any] , __a : Any , __a : bool = False ):
'''simple docstring'''
UpperCamelCase__ = TaConfig.from_json_file(__a )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ = TaEncoderModel(__a )
else:
UpperCamelCase__ = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a , __a , __a , __a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowerCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 178 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : int = 16
a : Optional[Any] = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16 ) -> str:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCAmelCase, padding='''longest''', max_length=__UpperCAmelCase, pad_to_multiple_of=__UpperCAmelCase, return_tensors='''pt''', )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : Dict = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __UpperCAmelCase ) == "1":
snake_case_ = 2
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ = MAX_GPU_BATCH_SIZE
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters(), lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=100, num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case_ = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ ,snake_case_ = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=__UpperCAmelCase, default=__UpperCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 56 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , **A : int ):
super().__init__(**A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(A )
def UpperCAmelCase__ ( self : List[str] , **A : Union[str, Any] ):
__snake_case: Optional[Any] = {}
__snake_case: Optional[int] = {}
__snake_case: Any = {}
# preprocess args
if "points_per_batch" in kwargs:
__snake_case: Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__snake_case: int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__snake_case: Any = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__snake_case: Dict = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__snake_case: Optional[int] = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__snake_case: Dict = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__snake_case: Optional[Any] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__snake_case: str = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__snake_case: Dict = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__snake_case: Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__snake_case: Any = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__snake_case: Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , A : Dict , *A : str , A : int=None , A : Optional[Any]=None , **A : Dict ):
return super().__call__(A , *A , num_workers=A , batch_size=A , **A )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] , A : Union[str, Any]=64 , A : int = 0 , A : float = 512 / 1_500 , A : Optional[int] = 32 , A : Optional[int] = 1 , ):
__snake_case: Dict = load_image(A )
__snake_case: Union[str, Any] = self.image_processor.size["""longest_edge"""]
__snake_case , __snake_case , __snake_case , __snake_case: List[str] = self.image_processor.generate_crop_boxes(
A , A , A , A , A , A )
__snake_case: Any = self.image_processor(images=A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__snake_case: Tuple = self.get_inference_context()
with inference_context():
__snake_case: Dict = self._ensure_tensor_on_device(A , device=self.device )
__snake_case: int = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__snake_case: List[str] = image_embeddings
__snake_case: Dict = grid_points.shape[1]
__snake_case: Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , A , A ):
__snake_case: Optional[Any] = grid_points[:, i : i + points_per_batch, :, :]
__snake_case: int = input_labels[:, i : i + points_per_batch]
__snake_case: List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ ( self : Any , A : Optional[Any] , A : Optional[int]=0.88 , A : int=0.95 , A : int=0 , A : Union[str, Any]=1 , ):
__snake_case: str = model_inputs.pop("""input_boxes""" )
__snake_case: Any = model_inputs.pop("""is_last""" )
__snake_case: List[str] = model_inputs.pop("""original_sizes""" ).tolist()
__snake_case: int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__snake_case: Any = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__snake_case: str = model_outputs["""pred_masks"""]
__snake_case: str = self.image_processor.post_process_masks(
A , A , A , A , binarize=A )
__snake_case: Optional[Any] = model_outputs["""iou_scores"""]
__snake_case , __snake_case , __snake_case: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A , A , A , A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int] , A : Dict=False , A : str=False , A : Optional[int]=0.7 , ):
__snake_case: Dict = []
__snake_case: List[str] = []
__snake_case: Tuple = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__snake_case: int = torch.cat(A )
__snake_case: List[str] = torch.cat(A )
__snake_case , __snake_case , __snake_case , __snake_case: Union[str, Any] = self.image_processor.post_process_for_mask_generation(
A , A , A , A )
__snake_case: Union[str, Any] = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
__snake_case: str = {}
if output_rle_mask:
__snake_case: Any = rle_mask
if output_bboxes_mask:
__snake_case: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 293 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
return F"""gaussian_noise_s={seed}_shape={"_".join([str(_lowerCamelCase ) for s in shape] )}.npy"""
def __lowerCAmelCase ( self ) ->Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 4, 64, 64) , _lowerCamelCase=False ) ->str:
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Tuple = jnp.array(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) , dtype=_lowerCamelCase )
return image
def __lowerCAmelCase ( self , _lowerCamelCase=False , _lowerCamelCase="CompVis/stable-diffusion-v1-4" ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Any = '''bf16''' if fpaa else None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = FlaxUNetaDConditionModel.from_pretrained(
_lowerCamelCase , subfolder='''unet''' , dtype=_lowerCamelCase , revision=_lowerCamelCase )
return model, params
def __lowerCAmelCase ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 77, 768) , _lowerCamelCase=False ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) , dtype=_lowerCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_latents(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_encoder_hidden_states(_lowerCamelCase , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = model.apply(
{'''params''': params} , _lowerCamelCase , jnp.array(_lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : List[str] = jnp.array(_lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.get_latents(_lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(_lowerCamelCase , shape=(4, 77, 1024) , fpaa=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = model.apply(
{'''params''': params} , _lowerCamelCase , jnp.array(_lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCamelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE : str = jnp.array(_lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
| 313 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 'imagegpt'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _lowerCamelCase=512 + 1 , _lowerCamelCase=32 * 32 , _lowerCamelCase=512 , _lowerCamelCase=24 , _lowerCamelCase=8 , _lowerCamelCase=None , _lowerCamelCase="quick_gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ) ->str:
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = n_positions
SCREAMING_SNAKE_CASE : Optional[int] = n_embd
SCREAMING_SNAKE_CASE : List[Any] = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : int = n_inner
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
SCREAMING_SNAKE_CASE : Dict = embd_pdrop
SCREAMING_SNAKE_CASE : List[str] = attn_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 32 , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 313 | 1 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Tuple ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =[x.strip() for x in open(snake_case_ ).readlines()]
_UpperCAmelCase : int =[x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
_UpperCAmelCase : int =calculate_rouge(snake_case_ , snake_case_ , **snake_case_ )
if save_path is not None:
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 370 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase ='\nHuman: <<task>>\n\nAssistant: '
lowercase ='huggingface-tools/default-prompts'
lowercase ={'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : List[str] =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCamelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict =cached_file(
__lowerCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCamelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 242 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=SCREAMING_SNAKE_CASE , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=SCREAMING_SNAKE_CASE , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=SCREAMING_SNAKE_CASE , default='''data/dump''' , help='''The dump file prefix.''' )
A_ : Dict = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A_ : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Tuple = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
A_ : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : str = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
A_ : str = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Tuple = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
A_ : Tuple = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
A_ : Any = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f'''{len(SCREAMING_SNAKE_CASE )} examples to process.''' )
A_ : Optional[Any] = []
A_ : List[Any] = 0
A_ : Union[str, Any] = 10_000
A_ : Tuple = time.time()
for text in data:
A_ : Any = f'''{bos} {text.strip()} {sep}'''
A_ : int = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
rslt.append(SCREAMING_SNAKE_CASE )
iter += 1
if iter % interval == 0:
A_ : Tuple = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A_ : List[str] = time.time()
logger.info('''Finished binarization''' )
logger.info(f'''{len(SCREAMING_SNAKE_CASE )} examples processed.''' )
A_ : Optional[Any] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A_ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : str = [np.uintaa(SCREAMING_SNAKE_CASE ) for d in rslt]
else:
A_ : Any = [np.intaa(SCREAMING_SNAKE_CASE ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as handle:
pickle.dump(rslt_ , SCREAMING_SNAKE_CASE , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 186 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 1 |
def snake_case (__lowercase , __lowercase ) -> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def snake_case () -> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 350 | from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : list[list[Edge]] = [[] for _ in range(lowercase_ )]
_snake_case : Union[str, Any] = size
def __getitem__( self , lowercase_ ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase ( self ):
return self._size
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : Tuple = 0
while queue:
_snake_case : List[Any] = queue.popleft()
_snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Dict = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 284 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_( snake_case : Tuple ):
'''simple docstring'''
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = FileLock(str(tmpdir / "foo.lock" ) )
snake_case_ = 0.01
with locka.acquire():
with pytest.raises(snake_case ):
snake_case_ = time.time()
locka.acquire(snake_case )
assert time.time() - _start > timeout
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = "a" * 1_0_0_0 + ".lock"
snake_case_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
snake_case_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case ):
locka.acquire(0 )
| 85 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : int = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Tuple = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , **_A : int ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : str = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Union[str, Any] = processor(text=_A )
UpperCAmelCase__ : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : str , _A : int=(2, 10, 16) , _A : Optional[int]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : Union[str, Any] = processor.decode(_A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : List[Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = self._get_dummy_logits()
UpperCAmelCase__ : Optional[Any] = 15
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : List[str] = -4.0
UpperCAmelCase__ : str = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : Any = -2_0.0
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[Any] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Any = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[Any] = os.listdir(_A )
UpperCAmelCase__ : int = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : Tuple = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : List[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase__ : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Any = iter(_A )
UpperCAmelCase__ : Dict = next(_A )
UpperCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : List[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : List[str] = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : Tuple = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : Dict = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : Any = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 181 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__snake_case, __snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_UpperCamelCase = sum(
count_of_possible_combinations_with_dp_array(target - item, __snake_case )
for item in array )
_UpperCamelCase = answer
return answer
_UpperCamelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = [0] * (target + 1)
_UpperCamelCase = 1
for i in range(1, target + 1 ):
for j in range(__snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = 3
_a = 5
_a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 100 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 100 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ):
"""simple docstring"""
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ""
else:
lowerCAmelCase_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = dct.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = "huggingface/label-files"
lowerCAmelCase_ = "imagenet-1k-id2label.json"
lowerCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(deit_name[-6:-4] )
lowerCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
elif deit_name[9:].startswith("small" ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
# load original model from timm
lowerCAmelCase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCAmelCase_ = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase_ = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase_ = encoding["pixel_values"]
lowerCAmelCase_ = model(__lowerCAmelCase )
lowerCAmelCase_ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 231 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == '''group''', )
SCREAMING_SNAKE_CASE_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__lowerCamelCase )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('''*''', __lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = '''weight'''
else:
SCREAMING_SNAKE_CASE_ = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('''.''' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
# load the pre-trained checkpoints
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WavLMConfigOrig(checkpoint['''cfg'''] )
SCREAMING_SNAKE_CASE_ = WavLMOrig(__lowerCamelCase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE_ = WavLMConfig.from_pretrained(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = WavLMConfig()
SCREAMING_SNAKE_CASE_ = WavLMModel(__lowerCamelCase )
recursively_load_weights(__lowerCamelCase, __lowerCamelCase )
hf_wavlm.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__UpperCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 352 |
def A__ ( __lowerCamelCase = 10_00 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 257 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : int = use_attention_mask
_lowerCamelCase : Tuple = use_token_type_ids
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : Any = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_choices
def A_ ( self ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[int] = None
if self.use_attention_mask:
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A_ ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Dict = True
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = FlaxRobertaModelTester(self )
@slow
def A_ ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Any = model_class_name.from_pretrained('roberta-base' , from_pt=lowercase )
_lowerCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase ) | 96 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowercase: Optional[int] = logging.get_logger(__name__)
class _lowercase ( A_ ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 360 |
def a( ) -> str:
"""simple docstring"""
a = 0
for i in range(1 , 1001 ):
total += i**i
return str(A )[-10:]
if __name__ == "__main__":
print(solution())
| 71 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCAmelCase : Any = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
_UpperCAmelCase : str = s_dict.pop(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape
_UpperCAmelCase : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_UpperCAmelCase : Optional[int] = mam_aaa['''args''']
_UpperCAmelCase : Dict = mam_aaa['''model''']
_UpperCAmelCase : Any = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
_UpperCAmelCase : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_UpperCAmelCase : Optional[int] = args.share_decoder_input_output_embed
_UpperCAmelCase : Any = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(''',''' )]
_UpperCAmelCase : Dict = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
_UpperCAmelCase : Tuple = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
_UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase : Any = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCAmelCase :Optional[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 263 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase :Optional[Any] = False
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
_UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe.dual_guided(
prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = '''cyberpunk 2077'''
_UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images
_UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe.text_to_image(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
_UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images
_UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 263 | 1 |
'''simple docstring'''
from math import pi, sqrt
def a__ ( lowerCAmelCase__ ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_7_1.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def a__ ( ) -> None:
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ = 1.0
while num:
UpperCamelCase__ = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''')
| 299 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299 | 1 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :str = eval_examples
lowerCAmelCase__ :List[Any] = post_process_function
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = "eval" ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ :Any = self.get_eval_dataloader(__UpperCAmelCase )
lowerCAmelCase__ :Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :str = self.compute_metrics
lowerCAmelCase__ :List[Any] = None
lowerCAmelCase__ :Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase__ :int = time.time()
try:
lowerCAmelCase__ :List[str] = eval_loop(
__UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :List[str] = compute_metrics
lowerCAmelCase__ :List[str] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase__ :Union[str, Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions )
lowerCAmelCase__ :Dict = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :Union[str, Any] = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
lowerCAmelCase__ :Dict = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase__ :List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ :Optional[int] = self.compute_metrics
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
lowerCAmelCase__ :Tuple = time.time()
try:
lowerCAmelCase__ :Optional[Any] = eval_loop(
__UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
lowerCAmelCase__ :List[str] = compute_metrics
lowerCAmelCase__ :List[Any] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ :Any = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , 'predict' )
lowerCAmelCase__ :int = self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
lowerCAmelCase__ :Optional[int] = metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 293 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Tuple = StableDiffusionXLImgaImgPipeline
__magic_name__ :List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__magic_name__ :Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__magic_name__ :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ :str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase__ :str = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase__ :int = CLIPTextModel(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :int = self.get_dummy_components()
lowerCAmelCase__ :List[str] = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = sd_pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :str = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :Tuple = negative_prompt
lowerCAmelCase__ :str = 3 * [inputs['prompt']]
lowerCAmelCase__ :Optional[Any] = sd_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase__ :Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = 3 * [inputs.pop('prompt' )]
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) :List[str] = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :str = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCAmelCase__ :Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase__ :Optional[int] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = pipe(**__UpperCAmelCase ).images
lowerCAmelCase__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 293 | 1 |
from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCamelCase = list(range(len(UpperCamelCase_ ) ) )
UpperCamelCase = [v / w for v, w in zip(UpperCamelCase_ , UpperCamelCase_ )]
index.sort(key=lambda UpperCamelCase_ : ratio[i] , reverse=UpperCamelCase_ )
UpperCamelCase = 0
UpperCamelCase = [0] * len(UpperCamelCase_ )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
| 165 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class snake_case__ ( UpperCamelCase):
a_ = "ctrl"
a_ = ["past_key_values"]
a_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , _A : int=24_65_34 , _A : Optional[Any]=2_56 , _A : Any=12_80 , _A : int=81_92 , _A : int=48 , _A : Optional[Any]=16 , _A : Dict=0.1 , _A : Any=0.1 , _A : List[str]=1e-6 , _A : str=0.02 , _A : Optional[Any]=True , **_A : int , ) -> Tuple:
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : Optional[Any] = n_embd
UpperCAmelCase_ : Optional[Any] = n_layer
UpperCAmelCase_ : List[str] = n_head
UpperCAmelCase_ : Union[str, Any] = dff
UpperCAmelCase_ : Dict = resid_pdrop
UpperCAmelCase_ : List[Any] = embd_pdrop
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Union[str, Any] = use_cache
super().__init__(**UpperCAmelCase_ )
| 304 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__A : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__A : Tuple = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Any = _readaa(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
lowerCAmelCase : Any = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
lowerCAmelCase : Optional[int] = data.reshape(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, 1 )
return data
@deprecated(_UpperCAmelCase, 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = labels_dense.shape[0]
lowerCAmelCase : Union[str, Any] = numpy.arange(_UpperCAmelCase ) * num_classes
lowerCAmelCase : List[str] = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase : List[str] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase, 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=10 ) -> List[str]:
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
lowerCAmelCase : Dict = numpy.frombuffer(_UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase, _UpperCAmelCase )
return labels
class __A :
@deprecated(
UpperCAmelCase_ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=dtypes.floataa , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase , lowerCAmelCase : int = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase : List[str] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
lowerCAmelCase : Dict = 10000
lowerCAmelCase : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase : Optional[int] = images.astype(numpy.floataa )
lowerCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
lowerCAmelCase : List[str] = images
lowerCAmelCase : List[str] = labels
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Optional[int] = 0
@property
def lowercase__ ( self : str ):
return self._images
@property
def lowercase__ ( self : Dict ):
return self._labels
@property
def lowercase__ ( self : List[Any] ):
return self._num_examples
@property
def lowercase__ ( self : Any ):
return self._epochs_completed
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True ):
if fake_data:
lowerCAmelCase : Union[str, Any] = [1] * 784
lowerCAmelCase : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase : Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perma]
lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase : Tuple = self._num_examples - start
lowerCAmelCase : Union[str, Any] = self._images[start : self._num_examples]
lowerCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = self.images[perm]
lowerCAmelCase : Optional[Any] = self.labels[perm]
# Start next epoch
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Dict = batch_size - rest_num_examples
lowerCAmelCase : int = self._index_in_epoch
lowerCAmelCase : Union[str, Any] = self._images[start:end]
lowerCAmelCase : int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase, 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCAmelCase, _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase, _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowerCAmelCase : List[Any] = f.size()
print('Successfully downloaded', _UpperCAmelCase, _UpperCAmelCase, 'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=dtypes.floataa, _UpperCAmelCase=True, _UpperCAmelCase=5_000, _UpperCAmelCase=None, _UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=_UpperCAmelCase, one_hot=_UpperCAmelCase, dtype=_UpperCAmelCase, seed=_UpperCAmelCase )
lowerCAmelCase : Tuple = fake()
lowerCAmelCase : Optional[Any] = fake()
lowerCAmelCase : List[Any] = fake()
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
if not source_url: # empty string check
lowerCAmelCase : Any = DEFAULT_SOURCE_URL
lowerCAmelCase : Optional[Any] = 'train-images-idx3-ubyte.gz'
lowerCAmelCase : Any = 'train-labels-idx1-ubyte.gz'
lowerCAmelCase : int = 't10k-images-idx3-ubyte.gz'
lowerCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
lowerCAmelCase : str = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : Any = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Tuple = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : int = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_images_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[Any] = _extract_images(_UpperCAmelCase )
lowerCAmelCase : Any = _maybe_download(
_UpperCAmelCase, _UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase, 'rb' ) as f:
lowerCAmelCase : List[str] = _extract_labels(_UpperCAmelCase, one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowerCAmelCase : str = (
'Validation size should be between 0 and '
f"{len(_UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : str = train_images[:validation_size]
lowerCAmelCase : Dict = train_labels[:validation_size]
lowerCAmelCase : List[str] = train_images[validation_size:]
lowerCAmelCase : str = train_labels[validation_size:]
lowerCAmelCase : str = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowerCAmelCase : int = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase, validation=_UpperCAmelCase, test=_UpperCAmelCase )
| 138 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "AutoTokenizer"
UpperCAmelCase__ : int = ["tokenizer"]
UpperCAmelCase__ : int = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _a , _a=None ) -> Dict:
super().__init__(_a )
_a : List[Any] = speaker_embeddings
@classmethod
def __lowercase ( cls , _a , _a="speaker_embeddings_path.json" , **_a ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
_a : List[Any] = get_file_from_repo(
_a , _a , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a , _a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : Optional[int] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : Tuple = json.load(_a )
else:
_a : Dict = None
_a : List[Any] = AutoTokenizer.from_pretrained(_a , **_a )
return cls(tokenizer=_a , speaker_embeddings=_a )
def __lowercase ( self , _a , _a="speaker_embeddings_path.json" , _a="speaker_embeddings" , _a = False , **_a , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a , _a , '''v2''' ) , exist_ok=_a )
_a : Any = {}
_a : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Union[str, Any] = self._load_voice_preset(_a )
_a : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _a , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_a , )
_a : int = os.path.join(_a , F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a , _a ) , '''w''' ) as fp:
json.dump(_a , _a )
super().save_pretrained(_a , _a , **_a )
def __lowercase ( self , _a = None , **_a ) -> Optional[int]:
_a : Any = self.speaker_embeddings[voice_preset]
_a : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : Optional[int] = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : int = np.load(_a )
return voice_preset_dict
def __lowercase ( self , _a = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , _a=None , _a=None , _a="pt" , _a=2_5_6 , _a=False , _a=True , _a=False , **_a , ) -> Dict:
if voice_preset is not None and not isinstance(_a , _a ):
if (
isinstance(_a , _a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Optional[Any] = self._load_voice_preset(_a )
else:
if isinstance(_a , _a ) and not voice_preset.endswith('''.npz''' ):
_a : List[Any] = voice_preset + '''.npz'''
_a : Union[str, Any] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a , **_a )
_a : Dict = BatchFeature(data=_a , tensor_type=_a )
_a : List[str] = self.tokenizer(
_a , return_tensors=_a , padding='''max_length''' , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , )
if voice_preset is not None:
_a : Optional[Any] = voice_preset
return encoded_text
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE_ = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
SCREAMING_SNAKE_CASE_ = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith("""emb.""" ):
SCREAMING_SNAKE_CASE = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
SCREAMING_SNAKE_CASE = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
SCREAMING_SNAKE_CASE = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
SCREAMING_SNAKE_CASE = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
SCREAMING_SNAKE_CASE = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
SCREAMING_SNAKE_CASE = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
SCREAMING_SNAKE_CASE = """rwkv.""" + name
SCREAMING_SNAKE_CASE = weight
return state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
SCREAMING_SNAKE_CASE = 5_02_77
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
SCREAMING_SNAKE_CASE = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
SCREAMING_SNAKE_CASE = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 296 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCamelCase = None
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
elif name.split('.' )[0] == "proj":
__lowerCamelCase = fairseq_model.proj
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowerCamelCase = emb.weight.data
return lin_layer
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase = f.readlines()
__lowerCamelCase = [line.split(' ' )[0] for line in lines]
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCamelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaConfig.from_pretrained(
UpperCamelCase__ , vocab_size=UpperCamelCase__ , decoder_layers=UpperCamelCase__ , do_stable_layer_norm=UpperCamelCase__ )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowerCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCamelCase = WavaVecaModel(UpperCamelCase__ )
__lowerCamelCase = recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaForCausalLM(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowerCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
__lowerCamelCase = False
# add projection layer
__lowerCamelCase = nn.Parameter(projection_layer.weight )
__lowerCamelCase = nn.Parameter(projection_layer.bias )
__lowerCamelCase = create_vocab_dict(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = SpeechaTextaTokenizer(os.path.join(UpperCamelCase__ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = hf_wavavec.config.to_dict()
__lowerCamelCase = tokenizer.pad_token_id
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 'speech_to_text_2'
__lowerCamelCase = 'wav2vec2'
__lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 348 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 348 | 1 |
from collections import defaultdict
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__lowerCamelCase = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__UpperCAmelCase ) )
]
__lowerCamelCase = defaultdict(__UpperCAmelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__lowerCamelCase = (1 << len(__UpperCAmelCase )) - 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__lowerCamelCase = self.count_ways_until(__UpperCAmelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__lowerCamelCase = total_ways_util
return self.dp[mask][task_no]
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(__UpperCAmelCase ) ):
for j in task_performed[i]:
self.task[j].append(__UpperCAmelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
a_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 330 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE : Tuple = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ) ->List[Any]:
"""simple docstring"""
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
snake_case_ = os.path.abspath("""examples""" )
for item in os.listdir(UpperCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case_ = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = """\n""".join(UpperCAmelCase_ )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(UpperCAmelCase_ , """""" )
self.assertEqual(UpperCAmelCase_ , """""" )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
snake_case_ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = False
@classmethod
def lowerCAmelCase ( cls : Any ) ->List[str]:
"""simple docstring"""
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase ( cls : List[str] ) ->int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
else:
self.assertIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
snake_case_ = re.findall("""({.+})""" , UpperCAmelCase_ )
snake_case_ = [r for r in results if """accuracy""" in r][-1]
snake_case_ = ast.literal_eval(UpperCAmelCase_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , """tracking""" ) ) )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 233 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __a (lowerCamelCase ):
__a : Optional[int] = "gpt_bigcode"
__a : Dict = ["past_key_values"]
__a : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , __magic_name__ : int=5_02_57 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Any=7_68 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=12 , __magic_name__ : int=None , __magic_name__ : Dict="gelu_pytorch_tanh" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=1E-5 , __magic_name__ : List[str]=0.0_2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : int=5_02_56 , __magic_name__ : Optional[int]=5_02_56 , __magic_name__ : List[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : Dict=True , **__magic_name__ : Tuple , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = n_positions
UpperCAmelCase_ : str = n_embd
UpperCAmelCase_ : Tuple = n_layer
UpperCAmelCase_ : Tuple = n_head
UpperCAmelCase_ : Any = n_inner
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = resid_pdrop
UpperCAmelCase_ : int = embd_pdrop
UpperCAmelCase_ : Any = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[int] = scale_attn_weights
UpperCAmelCase_ : int = use_cache
UpperCAmelCase_ : str = attention_softmax_in_fpaa
UpperCAmelCase_ : List[str] = scale_attention_softmax_in_fpaa
UpperCAmelCase_ : List[str] = multi_query
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : List[str] = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 125 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 100 ) -> int:
UpperCAmelCase_ : Tuple = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 125 | 1 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
snake_case = logging.getLogger(__name__)
snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase )} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _A ( self : Optional[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(default=lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : Optional[str] = field(
default=lowerCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=lowerCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase_ : bool = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _A ( self : int ):
if self.train_file is not None:
SCREAMING_SNAKE_CASE : Dict = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
SCREAMING_SNAKE_CASE : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
with open(lowercase , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = [json.loads(lowercase ) for line in f.read().splitlines() if (len(lowercase ) > 0 and not line.isspace())]
assert len(lowercase ) == len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {c: dataset[c] for c in dataset.column_names}
SCREAMING_SNAKE_CASE : Dict = refs
return Dataset.from_dict(lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if data_args.train_file is not None:
SCREAMING_SNAKE_CASE : Tuple = data_args.train_file
if data_args.validation_file is not None:
SCREAMING_SNAKE_CASE : Optional[int] = data_args.validation_file
SCREAMING_SNAKE_CASE : Optional[Any] = data_args.train_file.split("." )[-1]
if extension == "txt":
SCREAMING_SNAKE_CASE : List[str] = "text"
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset(lowercase , data_files=lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
SCREAMING_SNAKE_CASE : Any = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForMaskedLM.from_config(lowercase )
model.resize_token_embeddings(len(lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
SCREAMING_SNAKE_CASE : str = datasets["train"].column_names
else:
SCREAMING_SNAKE_CASE : List[str] = datasets["validation"].column_names
SCREAMING_SNAKE_CASE : List[str] = "text" if "text" in column_names else column_names[0]
SCREAMING_SNAKE_CASE : List[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(lowercase ):
# Remove empty lines
SCREAMING_SNAKE_CASE : Any = [line for line in examples["text"] if len(lowercase ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=lowercase , truncation=lowercase , max_length=data_args.max_seq_length )
SCREAMING_SNAKE_CASE : Tuple = datasets.map(
lowercase , batched=lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
SCREAMING_SNAKE_CASE : Tuple = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
SCREAMING_SNAKE_CASE : List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorForWholeWordMask(tokenizer=lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=lowercase , args=lowercase , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE : Dict = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Dict = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
SCREAMING_SNAKE_CASE : Dict = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(lowercase , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE : str = trainer.evaluate()
SCREAMING_SNAKE_CASE : Dict = math.exp(eval_output["eval_loss"] )
SCREAMING_SNAKE_CASE : Optional[Any] = perplexity
SCREAMING_SNAKE_CASE : int = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 366 |
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : list ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set_counts
SCREAMING_SNAKE_CASE : Any = max(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [1] * num_sets
SCREAMING_SNAKE_CASE : List[str] = list(range(UpperCAmelCase_ ) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : List[Any] = self.get_parent(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Tuple = src_parent
SCREAMING_SNAKE_CASE : Optional[int] = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE : Optional[Any] = max(self.max_set , UpperCAmelCase_ )
return True
def _A ( self : Tuple , UpperCAmelCase_ : int ):
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 319 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase_ = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCAmelCase_ = '''▁'''
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = AlbertTokenizer
def __init__( self : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : str="[CLS]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Tuple="[CLS]" , _UpperCamelCase : int="[MASK]" , **_UpperCamelCase : Tuple , ) ->Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__( self : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,) | 8 |
A_ :str = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 71 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _A , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
SCREAMING_SNAKE_CASE = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int=0):
A = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__SCREAMING_SNAKE_CASE))
A = torch.manual_seed(__SCREAMING_SNAKE_CASE)
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = self.get_dummy_inputs()
A = pipe(**__SCREAMING_SNAKE_CASE).images
A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def SCREAMING_SNAKE_CASE__ (self : int):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = self.get_dummy_inputs()
A = pipe(**__SCREAMING_SNAKE_CASE).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def SCREAMING_SNAKE_CASE__ (self : Any):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = self.get_dummy_inputs()
A = pipe(**__SCREAMING_SNAKE_CASE).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def SCREAMING_SNAKE_CASE__ (self : Optional[int]):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = self.get_dummy_inputs()
A = pipe(**__SCREAMING_SNAKE_CASE).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = self.get_dummy_inputs()
A = pipe(**__SCREAMING_SNAKE_CASE).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ (self : List[str]):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = ort.SessionOptions()
A = False
return options
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
A = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0)
A = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__SCREAMING_SNAKE_CASE , output_type="np" , )
A = output.images
A = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def SCREAMING_SNAKE_CASE__ (self : List[str]):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
A = init_image.resize((1_2_8, 1_2_8))
A = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler")
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE)
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0)
A = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__SCREAMING_SNAKE_CASE , output_type="np" , )
A = output.images
A = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
A = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 57 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MvpTokenizer
UpperCAmelCase_ =MvpTokenizerFast
UpperCAmelCase_ =True
UpperCAmelCase_ =filter_roberta_detectors
def _UpperCamelCase ( self ) -> Any:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(_A , range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _UpperCamelCase ( self , **_A ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , **_A ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def _UpperCamelCase ( self ) -> Optional[int]:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_A , padding=_A , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _UpperCamelCase ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_A , text_target=_A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = inputs['''input_ids''']
SCREAMING_SNAKE_CASE_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 299 |
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class A ( unittest.TestCase , lowerCamelCase__ ):
def _A (self ):
__lowercase= load_tool('text-to-speech' )
self.tool.setup()
def _A (self ):
torch.manual_seed(0 )
__lowercase= self.tool('hey' )
__lowercase= result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def _A (self ):
torch.manual_seed(0 )
__lowercase= self.tool('hey' )
__lowercase= result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 356 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__A = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__A = 128022
__A = 128028
@require_sentencepiece
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[str] = True
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: List[str] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase: List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__))))
__lowerCAmelCase: Optional[Any] = Path(self.tmpdirname)
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"])
__lowerCAmelCase: Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : Optional[int] , **UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : str)-> List[Any]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = "</s>"
__lowerCAmelCase: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__) , UpperCamelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__) , UpperCamelCase__)
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.get_tokenizer()
__lowerCAmelCase: int = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<s>")
self.assertEqual(len(UpperCamelCase__) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip("Skip this test while all models are still to be uploaded.")
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.get_tokenizer()
__lowerCAmelCase: str = tokenizer.tokenize("This is a test")
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [2, 3, 4, 5, 6] , )
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , "This is a test")
@slow
def lowercase_ ( self : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """facebook/m2m100_418M"""
SCREAMING_SNAKE_CASE_ : List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
SCREAMING_SNAKE_CASE_ : Tuple = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
SCREAMING_SNAKE_CASE_ : Any = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def lowercase_ ( cls : int)-> str:
'''simple docstring'''
__lowerCAmelCase: MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr")
__lowerCAmelCase: Any = 1
return cls
def lowercase_ ( self : int)-> Optional[int]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("ar") , 1_2_8_0_0_6)
self.assertEqual(self.tokenizer.get_lang_id("en") , 1_2_8_0_2_2)
self.assertEqual(self.tokenizer.get_lang_id("ro") , 1_2_8_0_7_6)
self.assertEqual(self.tokenizer.get_lang_id("mr") , 1_2_8_0_6_3)
def lowercase_ ( self : int)-> int:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase__) , self.tokenizer.vocab_size)
self.assertEqual(vocab["<unk>"] , 3)
self.assertIn(self.tokenizer.get_lang_token("en") , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = "en"
__lowerCAmelCase: Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__)
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids)
# fmt: off
__lowerCAmelCase: Optional[Any] = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowerCAmelCase: Optional[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__)
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = tempfile.mkdtemp()
__lowerCAmelCase: Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase__)
__lowerCAmelCase: Tuple = MaMaaaTokenizer.from_pretrained(UpperCamelCase__)
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase__)
@require_torch
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = "en"
__lowerCAmelCase: str = "fr"
__lowerCAmelCase: Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt")
__lowerCAmelCase: Optional[Any] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
__lowerCAmelCase: Tuple = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
__lowerCAmelCase: List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase: int = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
__lowerCAmelCase: List[str] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar")
self.assertEqual(
nested_simplify(UpperCamelCase__) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 217 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[int] = filter_non_english
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__lowerCAmelCase: List[Any] = {}
__lowerCAmelCase: Dict = {}
for i, value in enumerate(UpperCamelCase__):
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: Union[str, Any] = i
__lowerCAmelCase: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Union[str, Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCamelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowerCAmelCase: int = {}
for i, token in enumerate(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = i
__lowerCAmelCase: str = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
__lowerCAmelCase: Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
__lowerCAmelCase: Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase: Tuple = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
__lowerCAmelCase: str = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , "do_lower_case") else False
__lowerCAmelCase: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = ["的", "人", "有"]
__lowerCAmelCase: int = "".join(UpperCamelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Tuple = True
__lowerCAmelCase: str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: List[str] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: int = False
__lowerCAmelCase: Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase: Dict = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__)
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
@slow
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Dict = tokenizer.encode("你好" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.get_tokenizers(do_lower_case=UpperCamelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__lowerCAmelCase: str = "你好,你是谁"
__lowerCAmelCase: Dict = tokenizer.tokenize(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__)
__lowerCAmelCase: Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
| 217 | 1 |
'''simple docstring'''
from math import factorial
def UpperCAmelCase_ ( __lowercase : int = 100 ) -> int:
'''simple docstring'''
return sum(map(__lowercase , str(factorial(__lowercase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 156 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [False] * len(__lowercase )
_UpperCAmelCase = []
queue.append(__lowercase )
_UpperCAmelCase = True
while queue:
_UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
_UpperCAmelCase = True
_UpperCAmelCase = u
return visited[t]
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [-1] * (len(__lowercase ))
_UpperCAmelCase = 0
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
_UpperCAmelCase = float("Inf" )
_UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
_UpperCAmelCase = min(__lowercase , graph[parent[s]][s] )
_UpperCAmelCase = parent[s]
max_flow += path_flow
_UpperCAmelCase = sink
while v != source:
_UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCAmelCase = parent[v]
return max_flow
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 156 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "AutoTokenizer"
snake_case_ = ["tokenizer"]
snake_case_ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Optional[Any] ,A : Any ,A : Union[str, Any]=None ):
super().__init__(A )
__A = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Any ,A : str ,A : List[str]="speaker_embeddings_path.json" ,**A : Optional[int] ):
if speaker_embeddings_dict_path is not None:
__A = get_file_from_repo(
A ,A ,subfolder=kwargs.pop("subfolder" ,A ) ,cache_dir=kwargs.pop("cache_dir" ,A ) ,force_download=kwargs.pop("force_download" ,A ) ,proxies=kwargs.pop("proxies" ,A ) ,resume_download=kwargs.pop("resume_download" ,A ) ,local_files_only=kwargs.pop("local_files_only" ,A ) ,use_auth_token=kwargs.pop("use_auth_token" ,A ) ,revision=kwargs.pop("revision" ,A ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(A ,A )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
__A = None
else:
with open(A ) as speaker_embeddings_json:
__A = json.load(A )
else:
__A = None
__A = AutoTokenizer.from_pretrained(A ,**A )
return cls(tokenizer=A ,speaker_embeddings=A )
def UpperCamelCase_ ( self : Optional[Any] ,A : str ,A : int="speaker_embeddings_path.json" ,A : List[Any]="speaker_embeddings" ,A : bool = False ,**A : List[str] ,):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A ,A ,"v2" ) ,exist_ok=A )
__A = {}
__A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__A = self._load_voice_preset(A )
__A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] ,A ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=A ,)
__A = os.path.join(A ,f'''{prompt_key}_{key}.npy''' )
__A = tmp_dict
with open(os.path.join(A ,A ) ,"w" ) as fp:
json.dump(A ,A )
super().save_pretrained(A ,A ,**A )
def UpperCamelCase_ ( self : Dict ,A : str = None ,**A : Optional[int] ):
__A = self.speaker_embeddings[voice_preset]
__A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
__A = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] ,subfolder=kwargs.pop("subfolder" ,A ) ,cache_dir=kwargs.pop("cache_dir" ,A ) ,force_download=kwargs.pop("force_download" ,A ) ,proxies=kwargs.pop("proxies" ,A ) ,resume_download=kwargs.pop("resume_download" ,A ) ,local_files_only=kwargs.pop("local_files_only" ,A ) ,use_auth_token=kwargs.pop("use_auth_token" ,A ) ,revision=kwargs.pop("revision" ,A ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
__A = np.load(A )
return voice_preset_dict
def UpperCamelCase_ ( self : List[str] ,A : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any] ,A : str=None ,A : Dict=None ,A : Dict="pt" ,A : List[Any]=2_56 ,A : Optional[int]=False ,A : Optional[Any]=True ,A : Any=False ,**A : Union[str, Any] ,):
if voice_preset is not None and not isinstance(A ,A ):
if (
isinstance(A ,A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__A = self._load_voice_preset(A )
else:
if isinstance(A ,A ) and not voice_preset.endswith(".npz" ):
__A = voice_preset + ".npz"
__A = np.load(A )
if voice_preset is not None:
self._validate_voice_preset_dict(A ,**A )
__A = BatchFeature(data=A ,tensor_type=A )
__A = self.tokenizer(
A ,return_tensors=A ,padding="max_length" ,max_length=A ,return_attention_mask=A ,return_token_type_ids=A ,add_special_tokens=A ,**A ,)
if voice_preset is not None:
__A = voice_preset
return encoded_text
| 15 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a ( unittest.TestCase ):
def __init__( self , a__ ):
_lowerCamelCase = parent
def snake_case_ ( self ):
return {}
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_lowerCamelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class __a ( a__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : int = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case_ ( self ):
_lowerCamelCase = MarkupLMFeatureExtractionTester(self )
@property
def snake_case_ ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case_ ( self ):
# Initialize feature_extractor
_lowerCamelCase = self.feature_extraction_class()
# Test not batched input
_lowerCamelCase = get_html_strings()[0]
_lowerCamelCase = feature_extractor(lowerCAmelCase__ )
# fmt: off
_lowerCamelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_lowerCamelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
_lowerCamelCase = get_html_strings()
_lowerCamelCase = feature_extractor(lowerCAmelCase__ )
# fmt: off
_lowerCamelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_lowerCamelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 363 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_500_000 )-> int:
_lowerCamelCase = defaultdict(snake_case )
_lowerCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case , 2 ):
if gcd(snake_case , snake_case ) > 1:
continue
_lowerCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case , limit + 1 , snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 80 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> str:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = embedding_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = MegatronBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MegatronBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = MegatronBertForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MegatronBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = MegatronBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = MegatronBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MegatronBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MegatronBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MegatronBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
# test_resize_embeddings = False
snake_case_ = False
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MegatronBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
__A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
__lowerCamelCase = os.path.join(os.environ['MYDIR'] , lowerCamelCase__ )
__lowerCamelCase = MegatronBertModel.from_pretrained(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.half()
__lowerCamelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
__lowerCamelCase = output[0, ii, jj]
__lowerCamelCase = expected[3 * ii + jj]
__lowerCamelCase = 'ii={} jj={} a={} b={}'.format(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(math.isclose(lowerCamelCase__ , lowerCamelCase__ , rel_tol=lowerCamelCase__ , abs_tol=lowerCamelCase__ ) , msg=lowerCamelCase__ )
| 90 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase : Optional[int] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase : Optional[Any] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowercase : str = 'zero2'
lowercase : Optional[int] = 'zero3'
lowercase : Optional[Any] = [ZEROa, ZEROa]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> int:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = parameterized.to_safe_name("_".join(str(_lowerCamelCase) for x in param.args))
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowercase : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :Dict , a :Optional[Any] , a :str ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :str , a :str ) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[Any] , a :List[str] , a :int ) -> Optional[int]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Dict ) -> int:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
def _lowerCamelCase ( self :Any , a :List[str] ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def _lowerCamelCase ( self :Optional[Any] , a :str , a :str , a :int = 1_0 , a :bool = True , a :bool = True , a :bool = True , ) -> Any:
__UpperCamelCase : Optional[Any] = models[model]
__UpperCamelCase : List[Any] = self.run_trainer(
stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , )
self.do_checks(a )
return output_dir
def _lowerCamelCase ( self :List[str] , a :str , a :str , a :int = 1_0 , a :int = 1 , a :bool = True , a :bool = True , ) -> Dict:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir("./xxx" , after=a )
__UpperCamelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__UpperCamelCase : Dict = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__UpperCamelCase : int = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__UpperCamelCase : Optional[Any] = self.get_launcher(a )
__UpperCamelCase : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env() )
return output_dir
def _lowerCamelCase ( self :Any , a :List[Any]=False ) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__UpperCamelCase : List[Any] = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 232 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 265 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 265 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Optional[Any]:
'''simple docstring'''
A_ : List[Any] = parent
A_ : Union[str, Any] = batch_size
A_ : Tuple = seq_length
A_ : Dict = is_training
A_ : Optional[int] = use_token_type_ids
A_ : Union[str, Any] = use_input_mask
A_ : List[str] = use_labels
A_ : List[str] = use_mc_token_ids
A_ : str = vocab_size
A_ : Dict = hidden_size
A_ : int = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : str = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Union[str, Any] = num_labels
A_ : int = num_choices
A_ : Tuple = scope
A_ : Dict = self.vocab_size - 1
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_input_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Union[str, Any] = None
if self.use_mc_token_ids:
A_ : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
A_ : List[Any] = None
A_ : Tuple = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Tuple = self.get_config()
A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self )->Tuple:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : List[str] = CTRLModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
A_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[str] = CTRLLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A_ : Tuple = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : Tuple = CTRLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCamelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
snake_case = (CTRLLMHeadModel,) if is_torch_available() else ()
snake_case = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
snake_case = False
snake_case = False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[Any] = CTRLModelTester(self )
A_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 )
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*SCREAMING_SNAKE_CASE__ )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self )->int:
'''simple docstring'''
pass
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = CTRLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Tuple = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(SCREAMING_SNAKE_CASE__ )
A_ : Optional[int] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # Legal the president is
A_ : Optional[Any] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
A_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 186 | '''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_A : Optional[Any] = 16
_A : Union[str, Any] = 32
def UpperCamelCase_ ( snake_case_ : List[str] ) -> str:
'''simple docstring'''
return int(x / 2**20 )
class _lowercase :
'''simple docstring'''
def __enter__( self : List[Any] ) -> int:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowerCAmelCase = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
gc.collect()
torch.cuda.empty_cache()
__lowerCAmelCase = torch.cuda.memory_allocated()
__lowerCAmelCase = torch.cuda.max_memory_allocated()
__lowerCAmelCase = bamb(self.end - self.begin )
__lowerCAmelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase_ ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" , snake_case_ : int = 3_20 , snake_case_ : int = 1_60 , ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
__lowerCAmelCase = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": f"""train[:{n_train}]""", """validation""": f"""validation[:{n_val}]"""} )
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(snake_case_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["""lr"""]
__lowerCAmelCase = int(config["""num_epochs"""] )
__lowerCAmelCase = int(config["""seed"""] )
__lowerCAmelCase = int(config["""batch_size"""] )
__lowerCAmelCase = args.model_name_or_path
set_seed(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(snake_case_ , snake_case_ , snake_case_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
__lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCAmelCase = 1
__lowerCAmelCase = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
__lowerCAmelCase = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCAmelCase = 0
# Now we train the model
__lowerCAmelCase = {}
for epoch in range(snake_case_ , snake_case_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_ ):
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowerCAmelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(snake_case_ , snake_case_ )
def UpperCamelCase_ ( ) -> Any:
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case_ , )
parser.add_argument(
"""--output_dir""" , type=snake_case_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=snake_case_ , default=snake_case_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=snake_case_ , default=3_20 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=snake_case_ , default=1_60 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case_ , default=1 , help="""Number of train epochs.""" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 229 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a : str = logging.get_logger(__name__)
a : Union[str, Any] = """T5Config"""
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mt5"
__lowerCamelCase = MTaConfig
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mt5"
__lowerCamelCase = MTaConfig
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "mt5"
__lowerCamelCase = MTaConfig
| 150 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Optional[int]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 150 | 1 |
from collections import defaultdict
from math import gcd
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 150_0000 ):
lowercase = defaultdict(__SCREAMING_SNAKE_CASE )
lowercase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __SCREAMING_SNAKE_CASE , 2 ):
if gcd(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) > 1:
continue
lowercase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__SCREAMING_SNAKE_CASE , limit + 1 , __SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 195 |
from manim import *
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('CPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('GPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Model' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
rect.set_stroke(snake_case )
lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 )
self.add(snake_case )
model_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case , *snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Loaded Checkpoint' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = fill.copy().set_fill(snake_case , opacity=0.7 )
target.move_to(snake_case )
ckpt_arr.append(snake_case )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case , snake_case )
lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case )
lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Disk' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) )
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(FadeOut(snake_case ) )
lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=3 ) )
self.play(
FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , )
self.wait()
| 195 | 1 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[int | float] , lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCAmelCase__ )
or left < -len(lowerCAmelCase__ )
or right >= len(lowerCAmelCase__ )
or right < -len(lowerCAmelCase__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
UpperCAmelCase_: int = (left + right) >> 1 # the middle
UpperCAmelCase_: List[Any] = find_max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # find max in range[left, mid]
UpperCAmelCase_: Any = find_max(lowerCAmelCase__ , mid + 1 , lowerCAmelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 82 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( _lowerCAmelCase ):
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """embed_dim""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """num_heads""" ) )
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=[16, 48, 96], SCREAMING_SNAKE_CASE_=[1, 3, 6], SCREAMING_SNAKE_CASE_=[1, 2, 10], SCREAMING_SNAKE_CASE_=[7, 3, 3], SCREAMING_SNAKE_CASE_=[4, 2, 2], SCREAMING_SNAKE_CASE_=[2, 1, 1], SCREAMING_SNAKE_CASE_=[2, 2, 2], SCREAMING_SNAKE_CASE_=[False, False, True], SCREAMING_SNAKE_CASE_=[0.0, 0.0, 0.0], SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=2, ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = parent
UpperCAmelCase_: Any = batch_size
UpperCAmelCase_: Optional[int] = image_size
UpperCAmelCase_: Tuple = patch_sizes
UpperCAmelCase_: int = patch_stride
UpperCAmelCase_: int = patch_padding
UpperCAmelCase_: List[str] = is_training
UpperCAmelCase_: List[Any] = use_labels
UpperCAmelCase_: int = num_labels
UpperCAmelCase_: Dict = num_channels
UpperCAmelCase_: Any = embed_dim
UpperCAmelCase_: Optional[Any] = num_heads
UpperCAmelCase_: Dict = stride_kv
UpperCAmelCase_: Dict = depth
UpperCAmelCase_: Optional[Any] = cls_token
UpperCAmelCase_: List[str] = attention_drop_rate
UpperCAmelCase_: List[str] = initializer_range
UpperCAmelCase_: Tuple = layer_norm_eps
def __snake_case (self ) -> Dict:
UpperCAmelCase_: str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: Optional[Any] = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase_: str = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase_: List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case (self ) -> Tuple:
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Optional[int] = TFCvtModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_: Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_: str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: List[str] = self.num_labels
UpperCAmelCase_: Tuple = TFCvtForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Any = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = config_and_inputs
UpperCAmelCase_: Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = TFCvtModelTester(self )
UpperCAmelCase_: Dict = TFCvtConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self ) -> List[Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
def __snake_case (self ) -> Optional[int]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0, reason="""TF does not support backprop for grouped convolutions on CPU.""", )
@slow
def __snake_case (self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(SCREAMING_SNAKE_CASE_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Any = [*signature.parameters.keys()]
UpperCAmelCase_: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Optional[Any] = outputs.hidden_states
UpperCAmelCase_: Optional[int] = len(self.model_tester.depth )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> Optional[int]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Union[str, Any] = TFCvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Tuple:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Tuple = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""tf""" )
# forward pass
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase_: Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 82 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57 | 1 |
from __future__ import annotations
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: # noqa: E741
'''simple docstring'''
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def UpperCamelCase( lowercase_ ) -> int:
'''simple docstring'''
if len(lowercase_ ) == 0:
return 0
snake_case_ = [0] * len(lowercase_ )
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(lowercase_ ) ):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'levit'
def __init__( self , lowerCamelCase=224 , lowerCamelCase=3 , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=16 , lowerCamelCase=[128, 256, 384] , lowerCamelCase=[4, 8, 12] , lowerCamelCase=[4, 4, 4] , lowerCamelCase=[16, 16, 16] , lowerCamelCase=0 , lowerCamelCase=[2, 2, 2] , lowerCamelCase=[2, 2, 2] , lowerCamelCase=0.02 , **lowerCamelCase , ) -> Tuple:
super().__init__(**lowerCamelCase )
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Any = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
return 1e-4 | 34 | 1 |
UpperCAmelCase : str = 0 # The first color of the flag.
UpperCAmelCase : Any = 1 # The second color of the flag.
UpperCAmelCase : int = 2 # The third color of the flag.
UpperCAmelCase : List[Any] = (red, white, blue)
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE ) == 1:
return list(SCREAMING_SNAKE_CASE )
a__ : Tuple =0
a__ : Tuple =len(SCREAMING_SNAKE_CASE ) - 1
a__ : Optional[int] =0
while mid <= high:
if sequence[mid] == colors[0]:
a__ , a__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a__ , a__ : Dict =sequence[high], sequence[mid]
high -= 1
else:
a__ : Tuple =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 95 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = ReformerTokenizer
a_ = ReformerTokenizerFast
a_ = True
a_ = False
a_ = True
def A ( self : Optional[Any] ) -> List[Any]:
super().setUp()
UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[Any] = '''<s>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : Any ) -> str:
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_A ) , 10_00 )
def A ( self : Optional[int] ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A ( self : Optional[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(_A )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def A ( self : Tuple , _A : Dict=15 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase_ : Optional[int] = '''This is a simple input'''
UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def A ( self : Union[str, Any] ) -> int:
pass
def A ( self : int ) -> Any:
UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A )
UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : List[str] ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : Tuple = '''Hello World!'''
UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def A ( self : List[Any] ) -> str:
UpperCAmelCase_ : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
UpperCAmelCase_ : int = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def A ( self : List[str] ) -> Optional[int]:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ : List[Any] = ''' '''.join(_A )
UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' )
UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
UpperCAmelCase_ : List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape
UpperCAmelCase_ : Optional[int] = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def A ( self : int ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCAmelCase_ : Optional[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
| 304 | 0 |
"""simple docstring"""
import operator
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None ):
"""simple docstring"""
A__ = operator.lt if reverse else operator.gt
A__ = solution or []
if not arr:
return solution
A__ = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase__ ):
if _operator(UpperCamelCase__ , sublist[-1] ):
sublist.append(UpperCamelCase__ )
arr.pop(UpperCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase__ )
else:
while sublist:
A__ = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase__ ):
if not _operator(UpperCamelCase__ , UpperCamelCase__ ):
solution.insert(UpperCamelCase__ , UpperCamelCase__ )
break
else:
solution.append(UpperCamelCase__ )
strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 154 | """simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , UpperCamelCase__ ).groups()[0]
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> List[str]:
A__ = file_names
A__ = image_transform
A__ = label_to_id
def __len__( self ) -> Dict:
return len(self.file_names )
def __getitem__( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.file_names[idx]
A__ = PIL.Image.open(__UpperCAmelCase )
A__ = raw_image.convert('RGB' )
if self.image_transform is not None:
A__ = self.image_transform(__UpperCAmelCase )
A__ = extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
A__ = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config['num_epochs'] )
A__ = int(config['seed'] )
A__ = int(config['batch_size'] )
A__ = config['image_size']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
A__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
A__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
A__ = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
A__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
A__ = os.path.split(UpperCamelCase__ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
A__ = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
A__ = [extract_label(UpperCamelCase__ ) for fname in file_names]
A__ = list(set(UpperCamelCase__ ) )
id_to_label.sort()
A__ = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
A__ = np.random.permutation(len(UpperCamelCase__ ) )
A__ = int(0.8 * len(UpperCamelCase__ ) )
A__ = random_perm[:cut]
A__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
A__ = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
A__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
A__ = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
A__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
A__ = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = create_model('resnet50d' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
A__ = False
for param in model.get_classifier().parameters():
A__ = True
# We normalize the batches of images to be a bit faster.
A__ = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
A__ = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
A__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
A__ = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the starting epoch so files are named properly
A__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
A__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
A__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
A__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
A__ = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
A__ = int(training_difference.replace('epoch_' , '' ) ) + 1
A__ = None
else:
A__ = int(training_difference.replace('step_' , '' ) )
A__ = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
A__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
A__ = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
A__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
A__ = model(UpperCamelCase__ )
A__ = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
A__ = 0
A__ = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
A__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
A__ = (batch['image'] - mean) / std
with torch.no_grad():
A__ = model(UpperCamelCase__ )
A__ = outputs.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['label']) )
A__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
A__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(UpperCamelCase__ ),
'epoch': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
A__ = F'''epoch_{epoch}'''
if args.output_dir is not None:
A__ = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=UpperCamelCase__ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=UpperCamelCase__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase__ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
A__ = parser.parse_args()
A__ = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 154 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :float = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | """simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__lowerCAmelCase : str =list[list[float | int]]
def UpperCAmelCase__ ( lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Matrix:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = matrix[row][col]
lowercase = vector[row][0]
lowercase = 0
lowercase = 0
while row < size and col < size:
# pivoting
lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase , lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowercase = augmented[rowa][col] / augmented[row][col]
lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowercase = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(lowerCAmelCase__ )
]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowercase = [[0] for _ in range(lowerCAmelCase__ )]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowercase = (x_val + 1) ** (size - col - 1)
lowercase = y_val
lowercase = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ :int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def UpperCAmelCase__ ( lowerCAmelCase__ :Callable[[int], int] = question_function , lowerCAmelCase__ :int = 1_0 ) -> int:
'''simple docstring'''
lowercase = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase = 0
lowercase = 42
lowercase = 42
for poly in polynomials:
lowercase = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 197 | 1 |
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase (_A = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(UpperCAmelCase__ ):
_lowerCAmelCase : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCAmelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ).lstrip('./' )
def lowercase (_A ):
"""simple docstring"""
return f'{i * " "}*' if i else "\n##"
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCAmelCase__ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(UpperCAmelCase__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowercase (_A = "." ):
"""simple docstring"""
_lowerCAmelCase : List[str] = """"""
for filepath in sorted(good_file_paths(UpperCAmelCase__ ) ):
_lowerCAmelCase : Optional[Any] = os.path.split(UpperCAmelCase__ )
if filepath != old_path:
_lowerCAmelCase : Tuple = print_path(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_lowerCAmelCase : int = f'{filepath}/{filename}'.replace(' ' , '%20' )
_lowerCAmelCase : List[Any] = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'{md_prefix(UpperCAmelCase__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(""".""")
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCAmelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_lowerCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 0 |
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
_snake_case = len(__lowercase )
for i in range(__lowercase ):
_snake_case = -1
for j in range(i + 1 , __lowercase ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
for i, outer in enumerate(__lowercase ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = len(__lowercase )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(__lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : Union[str, Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 130 |
from __future__ import annotations
import requests
_lowerCamelCase : List[str] = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def a_ ( __lowercase : str , __lowercase : int = 1 , __lowercase : str = "new" , __lowercase : list | None = None ) -> dict:
_snake_case = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
_snake_case = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case = {}
for id_ in range(__lowercase ):
_snake_case = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext'''])) | 130 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase :List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Dict = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__SCREAMING_SNAKE_CASE : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ZeroShotClassificationPipeline(
model=lowercase , tokenizer=lowercase , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _a (self , lowercase , lowercase ):
A_ : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase )], """scores""": [ANY(lowercase )]} )
# No kwarg
A_ : int = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase )], """scores""": [ANY(lowercase )]} )
A_ : int = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase )], """scores""": [ANY(lowercase )]} )
A_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase ), ANY(lowercase )], """scores""": [ANY(lowercase ), ANY(lowercase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase ), ANY(lowercase )], """scores""": [ANY(lowercase ), ANY(lowercase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A_ : Dict = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(lowercase , {"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase )], """scores""": [ANY(lowercase )]} )
# https://github.com/huggingface/transformers/issues/13846
A_ : Any = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase ), ANY(lowercase )], """scores""": [ANY(lowercase ), ANY(lowercase )]}
for i in range(1 )
] , )
A_ : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowercase , [
{"""sequence""": ANY(lowercase ), """labels""": [ANY(lowercase ), ANY(lowercase )], """scores""": [ANY(lowercase ), ANY(lowercase )]}
for i in range(2 )
] , )
with self.assertRaises(lowercase ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(lowercase ):
classifier(lowercase , candidate_labels="""politics""" )
with self.assertRaises(lowercase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(lowercase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=lowercase )
with self.assertRaises(lowercase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(lowercase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=lowercase , )
self.run_entailment_id(lowercase )
def _a (self , lowercase ):
A_ : Any = zero_shot_classifier.model.config
A_ : Tuple = config.labelaid
A_ : Dict = zero_shot_classifier.entailment_id
A_ : Optional[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A_ : List[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : Optional[int] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A_ : int = original_labelaid
self.assertEqual(lowercase , zero_shot_classifier.entailment_id )
@require_torch
def _a (self ):
A_ : List[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _a (self ):
A_ : List[Any] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
A_ : Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def _a (self ):
A_ : Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
A_ : Optional[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def _a (self ):
A_ : Any = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
A_ : int = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
A_ : Tuple = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowercase , )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def _a (self ):
A_ : str = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
A_ : List[str] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
A_ : List[Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowercase , )
self.assertEqual(
nested_simplify(lowercase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , ) | 206 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
try:
A_ : List[Any] = split_input(lowerCamelCase__ )
if upper:
A_ : Tuple = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A_ : Optional[int] = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a ( lowerCamelCase__ ):
'''simple docstring'''
return to_simple_case(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
try:
A_ : Tuple = to_simple_case(lowerCamelCase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """_""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """-""" )
if __name__ == "__main__":
__import__('''doctest''').testmod() | 206 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a ="""src/diffusers"""
a ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
a =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
a =spec.loader.load_module()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , lowerCamelCase__ ) is not None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[Any] = object_name.split('.' )
__lowerCamelCase : List[Any] = 0
# First let's find the module where our object lives.
__lowerCamelCase : Union[str, Any] = parts[i]
while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__ , F"{module}.py" ) ):
i += 1
if i < len(lowerCamelCase__ ):
__lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , parts[i] )
if i >= len(lowerCamelCase__ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(lowerCamelCase__ , F"{module}.py" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase__ ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase__ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowerCamelCase : Union[str, Any] = line_index
while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index] , lowerCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCamelCase : List[str] = lines[start_index:line_index]
return "".join(lowerCamelCase__ )
a =re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
a =re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
a =re.compile(r"""<FILL\s+[^>]*>""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Dict = code.split('\n' )
__lowerCamelCase : Tuple = 0
while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase__ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : Any = len(get_indent(lowerCamelCase__ ) ) > 0
if has_indent:
__lowerCamelCase : Optional[Any] = F"class Bla:\n{code}"
__lowerCamelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCamelCase__ )
__lowerCamelCase : Any = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = style_docstrings_in_code(lowerCamelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[int] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase__ ):
__lowerCamelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = search.groups()
__lowerCamelCase : Any = find_code_in_diffusers(lowerCamelCase__ )
__lowerCamelCase : List[Any] = get_indent(lowerCamelCase__ )
__lowerCamelCase : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowerCamelCase : Optional[Any] = theoretical_indent
__lowerCamelCase : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowerCamelCase : int = True
while line_index < len(lowerCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase__ ):
break
__lowerCamelCase : Any = lines[line_index]
__lowerCamelCase : List[str] = _should_continue(lowerCamelCase__ , lowerCamelCase__ ) and re.search(F"^{indent}# End copy" , lowerCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCamelCase : int = lines[start_index:line_index]
__lowerCamelCase : List[Any] = ''.join(lowerCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowerCamelCase : int = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCamelCase__ ) is None]
__lowerCamelCase : Union[str, Any] = '\n'.join(lowerCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : int = replace_pattern.replace('with' , '' ).split(',' )
__lowerCamelCase : Optional[Any] = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = pattern.groups()
__lowerCamelCase : List[Any] = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if option.strip() == "all-casing":
__lowerCamelCase : Union[str, Any] = re.sub(obja.lower() , obja.lower() , lowerCamelCase__ )
__lowerCamelCase : Dict = re.sub(obja.upper() , obja.upper() , lowerCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowerCamelCase : Dict = blackify(lines[start_index - 1] + theoretical_code )
__lowerCamelCase : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowerCamelCase : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowerCamelCase : Optional[int] = start_index + 1
if overwrite and len(lowerCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase__ )
return diffs
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = False ) -> Any:
__lowerCamelCase : List[str] = glob.glob(os.path.join(lowerCamelCase__ , '**/*.py' ) , recursive=lowerCamelCase__ )
__lowerCamelCase : Any = []
for filename in all_files:
__lowerCamelCase : str = is_copy_consistent(lowerCamelCase__ , lowerCamelCase__ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Union[str, Any] = '\n'.join(lowerCamelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 73 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase = None
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowerCamelCase = {
'''google/rembert''': 256,
}
__lowerCamelCase = '''▁'''
class A__ ( _snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="[CLS]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = False if not self.vocab_file else True
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
A_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 162 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase_ : Optional[int] = 5_0_0_0_0
lowerCAmelCase_ : str = 5_0_0_0
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = os.path.split(__file__)
lowerCAmelCase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(lowerCAmelCase ):
UpperCAmelCase = dataset[i]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase = dataset[i : i + batch_size]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(lowerCAmelCase ):
UpperCAmelCase = dataset[i]
@get_duration
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = dataset[i : i + batch_size]
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase = generate_example_dataset(
os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase ) )
UpperCAmelCase = func(lowerCAmelCase , **lowerCAmelCase )
print("""shuffling dataset""" )
UpperCAmelCase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) )
UpperCAmelCase = func(
lowerCAmelCase , **lowerCAmelCase )
with open(lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 248 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=snake_case__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(snake_case__ )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=snake_case__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.dummy_cond_unet_upscale
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = DDIMScheduler(prediction_type="""v_prediction""" )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase = unet.half()
UpperCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionUpscalePipeline(
unet=snake_case__ , low_res_scheduler=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , max_noise_level=3_50 , )
UpperCAmelCase = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase = """A painting of a squirrel eating a burger"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="""np""" , ).images
UpperCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
UpperCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase = """a cat sitting on a park bench"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="""np""" , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
UpperCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase = """a cat sitting on a park bench"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , output_type="""np""" , )
UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
UpperCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
snake_case__ , torch_dtype=torch.floataa , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = """a cat sitting on a park bench"""
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(
prompt=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=5 , output_type="""np""" , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 248 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.