code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = k_size // 2
lowercase , lowercase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowercase = 1 / (2 * pi * sigma) * exp(-(square(lowerCAmelCase__ ) + square(lowerCAmelCase__ )) / (2 * square(lowerCAmelCase__ )) )
return g
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = image.shape[0], image.shape[1]
# dst image height and width
lowercase = height - k_size + 1
lowercase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowercase = zeros((dst_height * dst_width, k_size * k_size) )
lowercase = 0
for i, j in product(range(lowerCAmelCase__ ) , range(lowerCAmelCase__ ) ):
lowercase = ravel(image[i : i + k_size, j : j + k_size] )
lowercase = window
row += 1
# turn the kernel into shape(k*k, 1)
lowercase = gen_gaussian_kernel(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = ravel(lowerCAmelCase__ )
# reshape and get the dst image
lowercase = dot(lowerCAmelCase__ , lowerCAmelCase__ ).reshape(lowerCAmelCase__ , lowerCAmelCase__ ).astype(lowerCAmelCase__ )
return dst
if __name__ == "__main__":
# read original image
lowercase__ :int = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
lowercase__ :Any = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ :Optional[Any] = gaussian_filter(gray, 3, sigma=1)
lowercase__ :List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 101 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , ) ->tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : Any = 16
A__ : List[str] = 32
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : Any = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Dict = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : Optional[Any] = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Tuple = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : Union[str, Any] = 8
else:
lowerCAmelCase_ : List[str] = None
return tokenizer.pad(
__UpperCamelCase ,padding='''longest''' ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
lowerCAmelCase_ : Any = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Tuple = mocked_dataloaders # noqa: F811
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : List[str] ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,__UpperCamelCase ) == "1":
lowerCAmelCase_ : str = 2
# New Code #
lowerCAmelCase_ : int = int(args.gradient_accumulation_steps )
lowerCAmelCase_ : Any = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ : Dict = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Union[str, Any] = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : int = int(config['''seed'''] )
lowerCAmelCase_ : Tuple = int(config['''batch_size'''] )
lowerCAmelCase_ : Optional[int] = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : List[Any] = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
# Instantiate scheduler
lowerCAmelCase_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase ,model=__UpperCamelCase ,local_sgd_steps=__UpperCamelCase ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
lowerCAmelCase_ : Dict = model(**__UpperCamelCase )
lowerCAmelCase_ : str = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : int = model(**__UpperCamelCase )
lowerCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCAmelCase_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=__UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument(
'''--local_sgd_steps''' ,type=__UpperCamelCase ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 103 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Any:
'''simple docstring'''
def decorator(_lowercase : List[str] ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += [key]
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
def _SCREAMING_SNAKE_CASE ( *_lowercase : List[str] ) ->Any:
'''simple docstring'''
def decorator(_lowercase : str ):
a : Optional[Any] = getattr(_lowercase , "handle_key" , [] )
handle += keys
setattr(_lowercase , "handle_key" , _lowercase )
return func
return decorator
class __UpperCamelCase ( a__ ):
def __new__( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : int = super().__new__(cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , "key_handler" ):
setattr(lowerCAmelCase__ , "key_handler" , {} )
setattr(lowerCAmelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
a : Tuple = getattr(lowerCAmelCase__ , "handle_key" , [] )
for key in handled_keys:
a : Union[str, Any] = value
return new_cls
@staticmethod
def __a ( cls ) -> int:
a : List[str] = get_character()
if char != KEYMAP["undefined"]:
a : Tuple = ord(lowerCAmelCase__ )
a : List[Any] = cls.key_handler.get(lowerCAmelCase__ )
if handler:
a : Dict = char
return handler(cls )
else:
return None
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 105 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 297 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] ,lowercase_ : int = 1_2_8 ,lowercase_ : int = 2_5_6 ,lowercase_ : float = 2000.0 ,lowercase_ : int = 7_6_8 ,lowercase_ : int = 1_2 ,lowercase_ : int = 1_2 ,lowercase_ : int = 6_4 ,lowercase_ : int = 2_0_4_8 ,lowercase_ : float = 0.1 ,):
super().__init__()
lowerCAmelCase__ : str = nn.Sequential(
nn.Linear(lowercase_ ,d_model * 4 ,bias=lowercase_ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=lowercase_ ) ,nn.SiLU() ,)
lowerCAmelCase__ : List[str] = nn.Embedding(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : int = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : List[Any] = nn.Dropout(p=lowercase_ )
lowerCAmelCase__ : int = nn.ModuleList()
for lyr_num in range(lowercase_ ):
# FiLM conditional T5 decoder
lowerCAmelCase__ : List[Any] = DecoderLayer(d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ )
self.decoders.append(lowercase_ )
lowerCAmelCase__ : Dict = TaLayerNorm(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = nn.Dropout(p=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Union[str, Any] ,lowercase_ : int ):
lowerCAmelCase__ : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : Any ,lowercase_ : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase__ : int = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
lowerCAmelCase__ : str = self.conditioning_emb(lowercase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase__ : Dict = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase__ : Tuple = torch.broadcast_to(
torch.arange(lowercase_ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
lowerCAmelCase__ : Tuple = self.position_encoding(lowercase_ )
lowerCAmelCase__ : Dict = self.continuous_inputs_projection(lowercase_ )
inputs += position_encodings
lowerCAmelCase__ : List[Any] = self.dropout(lowercase_ )
# decoder: No padding present.
lowerCAmelCase__ : Optional[Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase__ : Tuple = [(x, self.encoder_decoder_mask(lowercase_ ,lowercase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase__ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
lowerCAmelCase__ : int = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
lowerCAmelCase__ : Any = lyr(
lowercase_ ,conditioning_emb=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,)[0]
lowerCAmelCase__ : Optional[Any] = self.decoder_norm(lowercase_ )
lowerCAmelCase__ : Optional[int] = self.post_dropout(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.spec_out(lowercase_ )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : List[str] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : Dict=1E-6 ):
super().__init__()
lowerCAmelCase__ : List[str] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,dropout_rate=lowercase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowercase_ ,d_kv=lowercase_ ,num_heads=lowercase_ ,dropout_rate=lowercase_ ,layer_norm_epsilon=lowercase_ ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ ,layer_norm_epsilon=lowercase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : Any ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : List[Any]=None ,lowercase_ : Tuple=None ,lowercase_ : List[str]=None ,):
lowerCAmelCase__ : List[Any] = self.layer[0](
lowercase_ ,conditioning_emb=lowercase_ ,attention_mask=lowercase_ ,)
if encoder_hidden_states is not None:
lowerCAmelCase__ : Optional[Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase__ : Tuple = self.layer[1](
lowercase_ ,key_value_states=lowercase_ ,attention_mask=lowercase_ ,)
# Apply Film Conditional Feed Forward layer
lowerCAmelCase__ : int = self.layer[-1](lowercase_ ,lowercase_ )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : Tuple ):
super().__init__()
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ )
lowerCAmelCase__ : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase_ )
lowerCAmelCase__ : Any = Attention(query_dim=lowercase_ ,heads=lowercase_ ,dim_head=lowercase_ ,out_bias=lowercase_ ,scale_qk=lowercase_ )
lowerCAmelCase__ : Tuple = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : List[Any] ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
# pre_self_attention_layer_norm
lowerCAmelCase__ : str = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
lowerCAmelCase__ : Any = self.FiLMLayer(lowercase_ ,lowercase_ )
# Self-attention block
lowerCAmelCase__ : int = self.attention(lowercase_ )
lowerCAmelCase__ : str = hidden_states + self.dropout(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : str ,lowercase_ : List[Any] ,lowercase_ : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : List[Any] ):
super().__init__()
lowerCAmelCase__ : Tuple = Attention(query_dim=lowercase_ ,heads=lowercase_ ,dim_head=lowercase_ ,out_bias=lowercase_ ,scale_qk=lowercase_ )
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ ,eps=lowercase_ )
lowerCAmelCase__ : Dict = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Tuple ,lowercase_ : str=None ,lowercase_ : Any=None ,):
lowerCAmelCase__ : int = self.layer_norm(lowercase_ )
lowerCAmelCase__ : int = self.attention(
lowercase_ ,encoder_hidden_states=lowercase_ ,attention_mask=attention_mask.squeeze(1 ) ,)
lowerCAmelCase__ : Optional[Any] = hidden_states + self.dropout(lowercase_ )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Any ,lowercase_ : int ,lowercase_ : int ,lowercase_ : str ):
super().__init__()
lowerCAmelCase__ : Tuple = TaDenseGatedActDense(d_model=lowercase_ ,d_ff=lowercase_ ,dropout_rate=lowercase_ )
lowerCAmelCase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowercase_ )
lowerCAmelCase__ : str = TaLayerNorm(lowercase_ ,eps=lowercase_ )
lowerCAmelCase__ : str = nn.Dropout(lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Optional[int]=None ):
lowerCAmelCase__ : Any = self.layer_norm(lowercase_ )
if conditioning_emb is not None:
lowerCAmelCase__ : str = self.film(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.DenseReluDense(lowercase_ )
lowerCAmelCase__ : Any = hidden_states + self.dropout(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : List[Any] ):
super().__init__()
lowerCAmelCase__ : List[Any] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : str = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Linear(lowercase_ ,lowercase_ ,bias=lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Dropout(lowercase_ )
lowerCAmelCase__ : Dict = NewGELUActivation()
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = self.act(self.wi_a(lowercase_ ) )
lowerCAmelCase__ : Any = self.wi_a(lowercase_ )
lowerCAmelCase__ : List[str] = hidden_gelu * hidden_linear
lowerCAmelCase__ : List[str] = self.dropout(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = self.wo(lowercase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any]=1E-6 ):
super().__init__()
lowerCAmelCase__ : str = nn.Parameter(torch.ones(lowercase_ ) )
lowerCAmelCase__ : int = eps
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase__ : Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=lowercase_ )
lowerCAmelCase__ : Tuple = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase__ : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ,lowercase_ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(lowercase_ ,3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase__ : Dict = nn.Linear(lowercase_ ,out_features * 2 ,bias=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Dict ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Any = self.scale_bias(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = torch.chunk(lowercase_ ,2 ,-1 )
lowerCAmelCase__ : int = x * (1 + scale) + shift
return x
| 106 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__ :
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple:
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__lowerCamelCase ) - 1
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__lowerCamelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]:
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 107 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
lowerCAmelCase : str = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
"""simple docstring"""
from timeit import timeit
def _snake_case ( UpperCamelCase : int ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def _snake_case ( UpperCamelCase : int ):
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCAmelCase : Dict = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _snake_case ( ):
def do_benchmark(UpperCamelCase : int ) -> None:
UpperCAmelCase : Dict = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(UpperCamelCase ) = }" )
UpperCAmelCase : List[str] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(UpperCamelCase ) = }" )
UpperCAmelCase : str = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 109 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] = ShapEPipeline
lowerCamelCase : int = ['''prompt''']
lowerCamelCase : Union[str, Any] = ['''prompt''']
lowerCamelCase : Union[str, Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase : Dict = False
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
return 3_2
@property
def __UpperCAmelCase ( self : List[str] ) -> str:
return 3_2
@property
def __UpperCAmelCase ( self : List[Any] ) -> str:
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : str ) -> Tuple:
return 8
@property
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def __UpperCAmelCase ( self : int ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 1_6,
'embedding_dim': self.time_input_dim,
'num_embeddings': 3_2,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCAmelCase = PriorTransformer(**__snake_case )
return model
@property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase = {
'param_shapes': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 1_2,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase = ShapERenderer(**__snake_case )
return model
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = self.dummy_prior
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_renderer
lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_0_2_4 , prediction_type='sample' , use_karras_sigmas=__snake_case , clip_sample=__snake_case , clip_sample_range=1.0 , )
lowerCAmelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(__snake_case )
else:
lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCAmelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 3_2,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__snake_case )
lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
lowerCAmelCase = output.images[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowerCAmelCase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = torch_device == 'cpu'
lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__snake_case , relax_max_difference=__snake_case , )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__snake_case )
lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = self.get_dummy_inputs(__snake_case )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase = batch_size * [inputs[key]]
lowerCAmelCase = pipe(**__snake_case , num_images_per_prompt=__snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
lowerCAmelCase = ShapEPipeline.from_pretrained('openai/shap-e' )
lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
'a shark' , generator=__snake_case , guidance_scale=15.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type='np' , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a ={
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 0 |
SCREAMING_SNAKE_CASE_ = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE_ = [1, 3, 5, 7, 9]
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
SCREAMING_SNAKE_CASE = 0
for digit in range(10 ):
SCREAMING_SNAKE_CASE = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _A , _A )
return result
SCREAMING_SNAKE_CASE = 0
for digita in range(10 ):
SCREAMING_SNAKE_CASE = digita
if (remainder + digita) % 2 == 0:
SCREAMING_SNAKE_CASE = ODD_DIGITS
else:
SCREAMING_SNAKE_CASE = EVEN_DIGITS
for digita in other_parity_digits:
SCREAMING_SNAKE_CASE = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _A , _A , )
return result
def __lowercase ( _SCREAMING_SNAKE_CASE = 9 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_A , 0 , [0] * length , _A )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 0 |
"""simple docstring"""
import re
import subprocess
import sys
_UpperCamelCase: Optional[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
_UpperCamelCase: Tuple = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
_UpperCamelCase: Any = '|'.join(sys.argv[1:])
_UpperCamelCase: Any = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
_UpperCamelCase: Dict = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 255 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content | 297 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class _UpperCamelCase ( lowerCamelCase__ ,lowerCamelCase__ ):
"""simple docstring"""
__a : Any = 1
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Any:
'''simple docstring'''
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(__snake_case )
__lowercase = []
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> str:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowercase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(__snake_case , __snake_case , __snake_case , __snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return sample
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(__snake_case , 1E-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps | 210 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 0 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , A : Distribution , A : Optional[Any]=None , A : Optional[int]=None , A : Optional[Any]=0 ) -> Tuple:
lowercase_ : Tuple = 1.0 if scale is None else scale
lowercase_ : List[Any] = 0.0 if loc is None else loc
super().__init__(__snake_case , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__snake_case )] )
@property
def A ( self : Any ) -> List[str]:
return self.base_dist.mean * self.scale + self.loc
@property
def A ( self : Union[str, Any] ) -> Tuple:
return self.base_dist.variance * self.scale**2
@property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[str] , A : int , A : Dict[str, int] , A : Callable[..., Tuple[torch.Tensor]] , **A : str ) -> Dict:
super().__init__(**__snake_case )
lowercase_ : Tuple = args_dim
lowercase_ : str = nn.ModuleList([nn.Linear(__snake_case , __snake_case ) for dim in args_dim.values()] )
lowercase_ : Optional[int] = domain_map
def A ( self : Tuple , A : torch.Tensor ) -> Tuple:
lowercase_ : List[str] = [proj(__snake_case ) for proj in self.proj]
return self.domain_map(*__snake_case )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Tuple , A : str ) -> int:
super().__init__()
lowercase_ : List[Any] = function
def A ( self : Optional[int] , A : Tuple , *A : Dict ) -> Dict:
return self.function(__snake_case , *__snake_case )
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42
SCREAMING_SNAKE_CASE_ : Any = 42
def __init__( self : List[str] , A : int = 1 ) -> Optional[int]:
lowercase_ : Dict = dim
lowercase_ : Dict = {k: dim * self.args_dim[k] for k in self.args_dim}
def A ( self : Dict , A : Optional[Any] ) -> str:
if self.dim == 1:
return self.distribution_class(*__snake_case )
else:
return Independent(self.distribution_class(*__snake_case ) , 1 )
def A ( self : int , A : List[str] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , ) -> List[str]:
lowercase_ : Tuple = self._base_distribution(__snake_case )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__snake_case , loc=__snake_case , scale=__snake_case , event_dim=self.event_dim )
@property
def A ( self : int ) -> Any:
return () if self.dim == 1 else (self.dim,)
@property
def A ( self : int ) -> Tuple:
return len(self.event_shape )
@property
def A ( self : Tuple ) -> Any:
return 0.0
def A ( self : Any , A : int ) -> Optional[Any]:
return ParameterProjection(
in_features=__snake_case , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A ( self : Optional[int] , *A : torch.Tensor ) -> Union[str, Any]:
raise NotImplementedError()
@staticmethod
def A ( A : torch.Tensor ) -> str:
return (x + torch.sqrt(torch.square(__snake_case ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE_ : str = StudentT
@classmethod
def A ( cls : Any , A : torch.Tensor , A : torch.Tensor , A : torch.Tensor ) -> Dict:
lowercase_ : str = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ : Tuple = 2.0 + cls.squareplus(__snake_case )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE_ : List[str] = Normal
@classmethod
def A ( cls : Dict , A : torch.Tensor , A : torch.Tensor ) -> Optional[Any]:
lowercase_ : Any = cls.squareplus(__snake_case ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE_ : Dict = NegativeBinomial
@classmethod
def A ( cls : Optional[int] , A : torch.Tensor , A : torch.Tensor ) -> Any:
lowercase_ : Any = cls.squareplus(__snake_case )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A ( self : Any , A : Optional[Any] ) -> Optional[int]:
lowercase_ : List[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__snake_case , logits=__snake_case )
else:
return Independent(self.distribution_class(total_count=__snake_case , logits=__snake_case ) , 1 )
def A ( self : Union[str, Any] , A : Tuple , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None ) -> int:
lowercase_ : Tuple = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowerCamelCase : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase = getattr(_A , _A )
if weight_type is not None:
UpperCamelCase = getattr(_A , _A ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.feature_extractor
UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_A , _A , _A , _A )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(_A )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , _A )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = full_name.split('adaptor.' )[-1]
UpperCamelCase = name.split('.' )
if items[1].isdigit():
UpperCamelCase = int(items[1] )
else:
UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
UpperCamelCase = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
UpperCamelCase = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
UpperCamelCase = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_A , _A ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
UpperCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
UpperCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_A , _A , bias=_A )
UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = WavaVecaConfig.from_pretrained(
_A , add_adapter=_A , adapter_stride=_A , adapter_kernel_size=_A , use_auth_token=_A , output_hidden_size=_A , )
UpperCamelCase = MBartConfig.from_pretrained(_A )
# load model
UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
UpperCamelCase = model[0].eval()
# load feature extractor
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(_A , use_auth_token=_A )
# set weights for wav2vec2 encoder
UpperCamelCase = WavaVecaModel(_A )
recursively_load_weights_wavaveca(model.encoder , _A )
# load decoder weights
UpperCamelCase = MBartForCausalLM(_A )
UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCamelCase = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
UpperCamelCase = False
UpperCamelCase = MBartaaTokenizer(_A )
tokenizer.save_pretrained(_A )
UpperCamelCase = hf_wavavec.config.to_dict()
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = 'mbart50'
UpperCamelCase = 'wav2vec2'
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = 250_004
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 28 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13 | 297 | 0 |
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : int = ''
lowerCamelCase__ : List[Any] = ''
lowerCamelCase__ : Optional[Any] = []
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase__ : Any = self.__min_dist_top_down_dp(__snake_case , n - 1 )
lowerCamelCase__ : Dict = self.__min_dist_top_down_dp(m - 1 , __snake_case )
lowerCamelCase__ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase__ : str = 1 + min(__snake_case , __snake_case , __snake_case )
return self.dp[m][n]
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : List[str] = worda
lowerCamelCase__ : Any = worda
lowerCamelCase__ : Union[str, Any] = [[-1 for _ in range(len(__snake_case ) )] for _ in range(len(__snake_case ) )]
return self.__min_dist_top_down_dp(len(__snake_case ) - 1 , len(__snake_case ) - 1 )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = worda
lowerCamelCase__ : int = worda
lowerCamelCase__ : Union[str, Any] = len(__snake_case )
lowerCamelCase__ : Tuple = len(__snake_case )
lowerCamelCase__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase__ : str = j
elif j == 0: # second string is empty
lowerCamelCase__ : Union[str, Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase__ : int = self.dp[i - 1][j - 1]
else:
lowerCamelCase__ : Optional[Any] = self.dp[i][j - 1]
lowerCamelCase__ : Union[str, Any] = self.dp[i - 1][j]
lowerCamelCase__ : Optional[int] = self.dp[i - 1][j - 1]
lowerCamelCase__ : str = 1 + min(__snake_case , __snake_case , __snake_case )
return self.dp[m][n]
if __name__ == "__main__":
A : Optional[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
A : Dict = input("Enter the first string: ").strip()
A : Union[str, Any] = input("Enter the second string: ").strip()
print()
print(f'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(f'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 184 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case , default_to_square=__snake_case )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case , default_to_square=__snake_case )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(__snake_case , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
SCREAMING_SNAKE_CASE_ = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__snake_case ):
SCREAMING_SNAKE_CASE_ = target_sizes.numpy()
SCREAMING_SNAKE_CASE_ = []
for idx in range(len(__snake_case ) ):
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__snake_case )
SCREAMING_SNAKE_CASE_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
SCREAMING_SNAKE_CASE_ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 225 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) | 297 | 0 |
class lowercase :
def __init__( self ,A__):
lowercase = len(__snake_case)
lowercase = [0] * len_array
if len_array > 0:
lowercase = array[0]
for i in range(1 ,__snake_case):
lowercase = self.prefix_sum[i - 1] + array[i]
def A__ ( self ,A__ ,A__):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A__ ( self ,A__):
lowercase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__snake_case)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) | 297 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = str(_A )
return len(_A ) == 9 and set(_A ) == set("123456789" )
def a__ ( ):
"""simple docstring"""
for base_num in range(9_999 , 4_999 , -1 ):
UpperCamelCase = 100_002 * base_num
if is_9_pandigital(_A ):
return candidate
for base_num in range(333 , 99 , -1 ):
UpperCamelCase = 1_002_003 * base_num
if is_9_pandigital(_A ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 297 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__snake_case ='Usage of script: script_name <size_of_canvas:int>'
__snake_case =[0] * 100 + [1] * 10
random.shuffle(choice)
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = [[False for i in range(_A )] for j in range(_A )]
return canvas
def a_ ( lowerCamelCase : List[str] ):
for i, row in enumerate(_A ):
for j, _ in enumerate(_A ):
lowerCAmelCase = bool(random.getrandbits(1 ) )
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = np.array(_A )
lowerCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_A ):
for c, pt in enumerate(_A ):
lowerCAmelCase = __judge_point(
_A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCAmelCase = current_canvas.tolist()
return return_canvas
def a_ ( lowerCamelCase : Any , lowerCamelCase : Dict ):
lowerCAmelCase = 0
lowerCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCAmelCase = pt
if pt:
if alive < 2:
lowerCAmelCase = False
elif alive == 2 or alive == 3:
lowerCAmelCase = True
elif alive > 3:
lowerCAmelCase = False
else:
if alive == 3:
lowerCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__snake_case =int(sys.argv[1])
# main working structure of this module.
__snake_case =create_canvas(canvas_size)
seed(c)
__snake_case =plt.subplots()
fig.show()
__snake_case =ListedColormap(["""w""", """k"""])
try:
while True:
__snake_case =run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[Any]:
if isinstance(_A , _A ) and isinstance(_A , _A ):
__lowerCamelCase : List[Any] = len(set_a.intersection(_A ) )
if alternative_union:
__lowerCamelCase : Union[str, Any] = len(_A ) + len(_A )
else:
__lowerCamelCase : Any = len(set_a.union(_A ) )
return intersection / union
if isinstance(_A , (list, tuple) ) and isinstance(_A , (list, tuple) ):
__lowerCamelCase : int = [element for element in set_a if element in set_b]
if alternative_union:
__lowerCamelCase : Dict = len(_A ) + len(_A )
return len(_A ) / union
else:
__lowerCamelCase : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_A ) / len(_A )
return len(_A ) / len(_A )
return None
if __name__ == "__main__":
a ={'a', 'b', 'c', 'd', 'e'}
a ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 73 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) ) | 297 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
"""simple docstring"""
_UpperCamelCase: Optional[Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 255 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = "▁" , lowerCAmelCase__ = True , lowerCAmelCase__ = "<unk>" , lowerCAmelCase__ = "</s>" , lowerCAmelCase__ = "<pad>" , ) -> Dict:
'''simple docstring'''
__lowercase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__lowercase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowercase = token_dict['token']
__lowercase = Tokenizer(Unigram() )
__lowercase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
__lowercase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__snake_case , add_prefix_space=__snake_case ),
pre_tokenizers.Digits(individual_digits=__snake_case ),
pre_tokenizers.Punctuation(),
] )
__lowercase = decoders.Metaspace(replacement=__snake_case , add_prefix_space=__snake_case )
__lowercase = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
__lowercase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = 80_00 , lowerCAmelCase__ = True , ) -> Dict:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=__snake_case , special_tokens=self.special_tokens_list , show_progress=__snake_case , )
if isinstance(__snake_case , __snake_case ):
__lowercase = [files]
self._tokenizer.train(__snake_case , trainer=__snake_case )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = 80_00 , lowerCAmelCase__ = True , ) -> List[str]:
'''simple docstring'''
__lowercase = trainers.UnigramTrainer(
vocab_size=__snake_case , special_tokens=self.special_tokens_list , show_progress=__snake_case , )
self._tokenizer.train_from_iterator(__snake_case , trainer=__snake_case )
self.add_unk_id()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = json.loads(self._tokenizer.to_str() )
__lowercase = self.special_tokens['unk']['id']
__lowercase = Tokenizer.from_str(json.dumps(__snake_case ) ) | 210 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Optional[Any]:
lowercase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowercase_ : Tuple = -1
lowercase_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowercase_ : Any = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowercase_ : int = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ : str = TextStreamer(__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ : List[str] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def A ( self : Union[str, Any] ) -> str:
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowercase_ : Tuple = -1
lowercase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowercase_ : List[str] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowercase_ : int = tokenizer.decode(greedy_ids[0] )
lowercase_ : List[Any] = TextIteratorStreamer(__snake_case )
lowercase_ : List[Any] = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowercase_ : Dict = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
lowercase_ : Dict = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__snake_case , __snake_case )
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowercase_ : List[Any] = -1
lowercase_ : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowercase_ : List[str] = model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case )
lowercase_ : int = greedy_ids[:, input_ids.shape[1] :]
lowercase_ : Dict = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ : List[Any] = TextStreamer(__snake_case , skip_prompt=__snake_case )
model.generate(__snake_case , max_new_tokens=10 , do_sample=__snake_case , streamer=__snake_case )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ : Union[str, Any] = cs.out[:-1]
self.assertEqual(__snake_case , __snake_case )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowercase_ : int = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__snake_case )
lowercase_ : Optional[Any] = -1
lowercase_ : str = torch.ones((1, 5) , device=__snake_case ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase_ : List[Any] = TextStreamer(__snake_case , skip_special_tokens=__snake_case )
model.generate(__snake_case , max_new_tokens=1 , do_sample=__snake_case , streamer=__snake_case )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase_ : Optional[Any] = cs.out[:-1] # Remove the final "\n"
lowercase_ : Tuple = tokenizer(__snake_case , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A ( self : Any ) -> Tuple:
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__snake_case )
lowercase_ : Optional[Any] = -1
lowercase_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__snake_case )
lowercase_ : Union[str, Any] = TextIteratorStreamer(__snake_case , timeout=0.001 )
lowercase_ : Dict = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowercase_ : List[Any] = Thread(target=model.generate , kwargs=__snake_case )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__snake_case ):
lowercase_ : Optional[int] = ''
for new_text in streamer:
streamer_text += new_text
| 33 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(_A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(_A , _A )
UpperCamelCase = xor(_A , _A )
UpperCamelCase = apply_sbox(_A , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(_A , temp[4:] )
UpperCamelCase = '0' * (2 - len(_A )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(_A )) + r
UpperCamelCase = apply_table(l + r , _A )
UpperCamelCase = xor(_A , _A )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter 10 bit key: ")
_lowerCamelCase : int = input("Enter 8 bit message: ")
_lowerCamelCase : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Optional[Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Any = [2, 4, 3, 1]
_lowerCamelCase : str = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : str = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : str = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : List[str] = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : List[Any] = temp[5:]
_lowerCamelCase : int = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Union[str, Any] = apply_table(left + right, pa_table)
_lowerCamelCase : Any = left_shift(left)
_lowerCamelCase : List[Any] = left_shift(right)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Any = apply_table(message, IP)
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[Any] = apply_table(CT, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = temp[4:] + temp[:4]
_lowerCamelCase : Optional[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 28 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 297 | 0 |
def lowercase_ ( _A : str ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ : str = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 0 |
import sys
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = len(_A )
lowercase = [[0 for x in range(_A )] for x in range(_A )]
lowercase = [[0 for x in range(_A )] for x in range(_A )]
for chain_length in range(2 , _A ):
for a in range(1 , n - chain_length + 1 ):
lowercase = a + chain_length - 1
lowercase = sys.maxsize
for c in range(_A , _A ):
lowercase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase = cost
lowercase = c
return matrix, sol
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if i == j:
print('''A''' + str(_A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(_A , _A , optimal_solution[i][j] )
print_optiomal_solution(_A , optimal_solution[i][j] + 1 , _A )
print(''')''' , end=''' ''' )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [30, 35, 15, 5, 10, 20, 25]
lowercase = len(_A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase = matrix_chain_order(_A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 101 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class _lowerCamelCase ( lowerCamelCase__ ):
UpperCAmelCase_ = "audio-spectrogram-transformer"
def __init__(self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.02 , __a=1e-1_2 , __a=16 , __a=True , __a=10 , __a=10 , __a=10_24 , __a=1_28 , **__a , ) -> Optional[Any]:
super().__init__(**__snake_case )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = patch_size
UpperCamelCase = qkv_bias
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
| 153 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase_ ( lowerCamelCase__ ):
lowerCamelCase : Optional[Any] = '''roberta'''
def __init__( self : Tuple , UpperCAmelCase__ : List[str]=5_0_2_6_5 , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Tuple=3_0_7_2 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=5_1_2 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : int=1E-12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Optional[int]="absolute" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : str , ) -> Optional[int]:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( lowerCamelCase__ ):
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
if self.task == "multiple-choice":
lowerCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = ['''flax''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : List[str] = ['''flax''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = ['''flax''']
def __init__( self : Union[str, Any] ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : str ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = ['''flax''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Tuple ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = ['''flax''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : int ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : str ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = ['''flax''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : List[str] = ['''flax''']
def __init__( self : str ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : str ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Tuple = ['''flax''']
def __init__( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Tuple = ['''flax''']
def __init__( self : List[Any] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : int ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : int):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Dict = ['''flax''']
def __init__( self : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = ['''flax''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : List[str]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Any ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Dict):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : str = ['''flax''']
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Tuple):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Optional[Any] ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['flax'])
class A_ ( metaclass=lowerCamelCase__ ):
_UpperCAmelCase : str = ['''flax''']
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
requires_backends(cls ,['flax'])
@classmethod
def lowerCAmelCase ( cls : Tuple ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Any):
requires_backends(cls ,['flax'])
| 73 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE = []
# custom device map
if isinstance(_A , _A ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE = get_keys_to_not_convert(_A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_A )
SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_A )
# compatibility with peft
SCREAMING_SNAKE_CASE = load_in_abit
SCREAMING_SNAKE_CASE = load_in_abit
SCREAMING_SNAKE_CASE = get_parameter_device(_A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE = replace_with_bnb_layers(_A , _A , modules_to_not_convert=_A )
# convert param to the right dtype
SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
SCREAMING_SNAKE_CASE = getattr(_A , _A , _A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_A ):
param.to(_A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE = replace_with_bnb_layers(
_A , _A , modules_to_not_convert=_A )
SCREAMING_SNAKE_CASE = get_quantized_model_device_map(
_A , _A , _A , max_memory=_A , no_split_module_classes=_A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_A , _A , _A , dtype=bnb_quantization_config.torch_dtype , offload_folder=_A , offload_state_dict=_A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_A , device_map=_A , offload_dir=_A )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE = {'': torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{\'\':torch.cuda.current_device()}`.""" )
if isinstance(_A , _A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or """
"""\'sequential\'.""" )
SCREAMING_SNAKE_CASE = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = special_dtypes
SCREAMING_SNAKE_CASE = no_split_module_classes
SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE = get_balanced_memory(
_A , low_zero=(device_map == """balanced_low_0""") , max_memory=_A , **_A , )
SCREAMING_SNAKE_CASE = max_memory
SCREAMING_SNAKE_CASE = infer_auto_device_map(_A , **_A )
if isinstance(_A , _A ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n """ )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_A , _A , _A , _A )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE = []
current_key_name.append(_A )
if isinstance(_A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE = '.'.join(_A )
SCREAMING_SNAKE_CASE = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can\'t be both False""" )
SCREAMING_SNAKE_CASE = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE = module.bias.data
bnb_module.requires_grad_(_A )
setattr(_A , _A , _A )
SCREAMING_SNAKE_CASE = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE = _replace_with_bnb_layers(
_A , _A , _A , _A )
SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE = deepcopy(_A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE = find_tied_parameters(_A )
# For compatibility with Accelerate < 0.18
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE = sum(_A , [] )
SCREAMING_SNAKE_CASE = len(_A ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE = False
if hasattr(_A , """base_model_prefix""" ):
SCREAMING_SNAKE_CASE = not hasattr(_A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE = list(model.named_children() )
SCREAMING_SNAKE_CASE = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE = set(_A ) - set(_A )
SCREAMING_SNAKE_CASE = list(set(_A ) ) + list(_A )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE = ['.weight', '.bias']
SCREAMING_SNAKE_CASE = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE = name.replace(_A , """""" )
filtered_module_names.append(_A )
return filtered_module_names
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for m in model.modules():
if isinstance(_A , bnb.nn.Linearabit ):
return True
return False
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_A , _A , 0 , dtype=_A , value=_A )
SCREAMING_SNAKE_CASE = param_name
SCREAMING_SNAKE_CASE = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE = getattr(_A , _A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE = new_module
SCREAMING_SNAKE_CASE = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE = False
offload_weight(module._parameters[tensor_name] , _A , _A , index=_A )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A , )
else:
offload_weight(_A , _A , _A , index=_A )
offload_weight(_A , param_name.replace("""weight""" , """SCB""" ) , _A , index=_A )
set_module_tensor_to_device(_A , _A , """meta""" , dtype=_A , value=torch.empty(*param.size() ) )
| 296 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] = u
for i in range(1 , _A ):
lowercase : Any = temp * (u - i)
return temp
def lowercase__ ( ) -> Tuple:
'''simple docstring'''
lowercase : Tuple = int(input('enter the numbers of values: ' ) )
lowercase : list[list[float]] = []
for _ in range(_A ):
y.append([] )
for i in range(_A ):
for j in range(_A ):
y[i].append(_A )
lowercase : Optional[Any] = 0
print('enter the values of parameters in a list: ' )
lowercase : Optional[Any] = list(map(_A , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_A ):
lowercase : List[Any] = float(input() )
lowercase : Any = int(input('enter the value to interpolate: ' ) )
lowercase : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _A ):
for j in range(n - i ):
lowercase : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
lowercase : Dict = y[0][0]
for i in range(1 , _A ):
summ += (ucal(_A , _A ) * y[0][i]) / math.factorial(_A )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 255 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content | 297 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> int:
'''simple docstring'''
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = TFConvBertModel(config=__snake_case )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(__snake_case )
__lowercase = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = TFConvBertForMaskedLM(config=__snake_case )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=__snake_case )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=__snake_case )
__lowercase = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=__snake_case )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = TFConvBertForQuestionAnswering(config=__snake_case )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a : Tuple = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : Tuple = False
__a : Any = False
__a : Any = False
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(__snake_case , '''use_cache''' ):
__lowercase = True
__lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''key_length''' , __snake_case )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(__snake_case , __snake_case )
__lowercase = model_class(__snake_case )
__lowercase = len(model(__snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case , saved_model=__snake_case )
__lowercase = os.path.join(__snake_case , '''saved_model''' , '''1''' )
__lowercase = tf.keras.models.load_model(__snake_case )
__lowercase = model(__snake_case )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(__snake_case ) , __snake_case )
__lowercase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''key_length''' , __snake_case )
__lowercase = getattr(self.model_tester , '''key_length''' , __snake_case )
def check_decoder_attentions_output(lowerCAmelCase__ ):
__lowercase = len(__snake_case )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(__snake_case )
__lowercase = model(self._prepare_for_class(__snake_case , __snake_case ) )
__lowercase = len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
__lowercase = model_class(__snake_case )
__lowercase = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(__snake_case )
__lowercase = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(__snake_case )
__lowercase = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(__snake_case )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , __snake_case )
__lowercase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-4 ) | 210 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __snake_case : int , __snake_case : Dict , __snake_case : Union[str, Any] ):
# Initialise PyTorch model
lowercase_ : Tuple = BertConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : List[Any] = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase = self.block_out_channels[i]
UpperCamelCase = self.block_out_channels[i + 1]
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__snake_case )
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__snake_case )
UpperCamelCase = blocks
UpperCamelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.conv_in(__snake_case )
UpperCamelCase = nn.silu(__snake_case )
for block in self.blocks:
UpperCamelCase = block(__snake_case )
UpperCamelCase = nn.silu(__snake_case )
UpperCamelCase = self.conv_out(__snake_case )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1_280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = """rgb"""
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def A ( self : List[str] , UpperCamelCase__ : jax.random.KeyArray ):
"""simple docstring"""
UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase = jnp.zeros(__snake_case , dtype=jnp.floataa )
UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase = jnp.zeros(__snake_case , dtype=jnp.floataa )
UpperCamelCase = jax.random.split(__snake_case )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )["params"]
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.block_out_channels
UpperCamelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
UpperCamelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = block_out_channels[0]
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
for _ in range(self.layers_per_block ):
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__snake_case )
if not is_final_block:
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__snake_case )
UpperCamelCase = down_blocks
UpperCamelCase = controlnet_down_blocks
# mid
UpperCamelCase = block_out_channels[-1]
UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=__snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase = nn.Conv(
__snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase = jnp.flip(__snake_case , axis=1 )
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase = jnp.expand_dims(__snake_case , 0 )
UpperCamelCase = self.time_proj(__snake_case )
UpperCamelCase = self.time_embedding(__snake_case )
# 2. pre-process
UpperCamelCase = jnp.transpose(__snake_case , (0, 2, 3, 1) )
UpperCamelCase = self.conv_in(__snake_case )
UpperCamelCase = jnp.transpose(__snake_case , (0, 2, 3, 1) )
UpperCamelCase = self.controlnet_cond_embedding(__snake_case )
sample += controlnet_cond
# 3. down
UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
UpperCamelCase = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
UpperCamelCase = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
# 5. contronet blocks
UpperCamelCase = ()
for down_block_res_sample, controlnet_block in zip(__snake_case , self.controlnet_down_blocks ):
UpperCamelCase = controlnet_block(__snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase = controlnet_down_block_res_samples
UpperCamelCase = self.controlnet_mid_block(__snake_case )
# 6. scaling
UpperCamelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__snake_case , mid_block_res_sample=__snake_case )
| 28 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13 | 297 | 0 |
import math
def lowercase_ ( _A : List[str] , _A : Optional[Any] ):
"""simple docstring"""
return math.pow(_A , 2 ) - a
def lowercase_ ( _A : str ):
"""simple docstring"""
return 2 * x
def lowercase_ ( _A : List[str] ):
"""simple docstring"""
lowerCamelCase__ : str = 2.0
while start <= a:
lowerCamelCase__ : Optional[Any] = math.pow(_A , 2 )
return start
def lowercase_ ( _A : Any , _A : Union[str, Any] = 9999 , _A : Optional[Any] = 0.00_000_000_000_001 ):
"""simple docstring"""
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase__ : Union[str, Any] = get_initial_point(_A )
for _ in range(_A ):
lowerCamelCase__ : str = value
lowerCamelCase__ : Tuple = value - fx(_A , _A ) / fx_derivative(_A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 184 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase_ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
SCREAMING_SNAKE_CASE_ = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
__snake_case , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
] , )
SCREAMING_SNAKE_CASE_ = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = '<pad>'
SCREAMING_SNAKE_CASE_ = text_generator(
['This is a test', 'This is a second test'] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
],
[
{'generated_token_ids': ANY(__snake_case )},
{'generated_token_ids': ANY(__snake_case )},
],
] , )
@require_tf
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
SCREAMING_SNAKE_CASE_ = text_generator(['This is a test', 'This is a second test'] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = 'Hello I believe in'
SCREAMING_SNAKE_CASE_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE_ = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
SCREAMING_SNAKE_CASE_ = text_generator(__snake_case , stop_sequence=' fe' )
self.assertEqual(__snake_case , [{'generated_text': 'Hello I believe in fe'}] )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = text_generator.model
SCREAMING_SNAKE_CASE_ = text_generator.tokenizer
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE_ = pipeline(task='text-generation' , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE_ = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE_ = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
[{'generated_text': ANY(__snake_case )}, {'generated_text': ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE_ = text_generator('test' , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE_ = text_generator('test' , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
SCREAMING_SNAKE_CASE_ = text_generator('test' , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE_ = text_generator('' )
self.assertEqual(__snake_case , [{'generated_text': ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE_ = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE_ = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE_ = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase_ ( self : Union[str, Any] ):
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE_ = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_ = pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_ = pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe('This is a test' )
self.assertEqual(
__snake_case , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase_ ( self : List[Any] ):
import torch
SCREAMING_SNAKE_CASE_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase_ ( self : List[str] ):
import torch
SCREAMING_SNAKE_CASE_ = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=__snake_case , top_p=0.5 )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = 'Hello world'
SCREAMING_SNAKE_CASE_ = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers.generation.tf_utils' )
else:
SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers.generation.utils' )
SCREAMING_SNAKE_CASE_ = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
SCREAMING_SNAKE_CASE_ = text_generator(__snake_case , max_length=10 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
SCREAMING_SNAKE_CASE_ = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
SCREAMING_SNAKE_CASE_ = text_generator(__snake_case , max_length=10 )
self.assertNotIn(__snake_case , cl.out ) | 225 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) | 297 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = f'{sampling_rate}'
lowercase = '1'
lowercase = 'f32le'
lowercase = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase = ffmpeg_process.communicate(_A )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
lowercase = output_stream[0]
lowercase = np.frombuffer(_A , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = "f32le" , ):
'''simple docstring'''
lowercase = f'{sampling_rate}'
lowercase = '1'
if format_for_conversion == "s16le":
lowercase = 2
elif format_for_conversion == "f32le":
lowercase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
lowercase = platform.system()
if system == "Linux":
lowercase = 'alsa'
lowercase = 'default'
elif system == "Darwin":
lowercase = 'avfoundation'
lowercase = ':0'
elif system == "Windows":
lowercase = 'dshow'
lowercase = 'default'
lowercase = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase = _ffmpeg_stream(_A , _A )
for item in iterator:
yield item
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase = stream_chunk_s
else:
lowercase = chunk_length_s
lowercase = ffmpeg_microphone(_A , _A , format_for_conversion=_A )
if format_for_conversion == "s16le":
lowercase = np.intaa
lowercase = 2
elif format_for_conversion == "f32le":
lowercase = np.floataa
lowercase = 4
else:
raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
lowercase = chunk_length_s / 6
lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_A , (int, float) ):
lowercase = [stride_length_s, stride_length_s]
lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase = datetime.datetime.now()
lowercase = datetime.timedelta(seconds=_A )
for item in chunk_bytes_iter(_A , _A , stride=(stride_left, stride_right) , stream=_A ):
# Put everything back in numpy scale
lowercase = np.frombuffer(item['''raw'''] , dtype=_A )
lowercase = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
lowercase = B''
lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
lowercase = 0
for raw in iterator:
acc += raw
if stream and len(_A ) < chunk_len:
lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_A ) >= chunk_len:
# We are flushing the accumulator
lowercase = (_stride_left, stride_right)
lowercase = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase = False
yield item
lowercase = stride_left
lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_A ) > stride_left:
lowercase = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase = False
yield item
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(_A , stdout=subprocess.PIPE , bufsize=_A ) as ffmpeg_process:
while True:
lowercase = ffmpeg_process.stdout.read(_A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 101 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) | 297 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 153 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 297 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCamelCase : List[Any] = StableDiffusionXLImgaImgPipeline
lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : str ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase = CLIPTextModel(__snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__snake_case )
lowerCAmelCase = CLIPTextModelWithProjection(__snake_case )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__snake_case )
lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple=0 ) -> int:
lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
lowerCAmelCase = image / 2 + 0.5
if str(__snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(__snake_case )
else:
lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__snake_case )
lowerCAmelCase = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
lowerCAmelCase = self.get_dummy_inputs(__snake_case )
lowerCAmelCase = sd_pipe(**__snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Any ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : int ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : int ) -> Tuple:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__snake_case )
lowerCAmelCase = sd_pipe.to(__snake_case )
lowerCAmelCase = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
# forward without prompt embeds
lowerCAmelCase = self.get_dummy_inputs(__snake_case )
lowerCAmelCase = 3 * ['this is a negative prompt']
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs['prompt']]
lowerCAmelCase = sd_pipe(**__snake_case )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase = self.get_dummy_inputs(__snake_case )
lowerCAmelCase = 3 * ['this is a negative prompt']
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
(
lowerCAmelCase
) = sd_pipe.encode_prompt(__snake_case , negative_prompt=__snake_case )
lowerCAmelCase = sd_pipe(
**__snake_case , prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , pooled_prompt_embeds=__snake_case , negative_pooled_prompt_embeds=__snake_case , )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : Optional[Any]=0 ) -> Dict:
lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
lowerCAmelCase = np.random.RandomState(__snake_case ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
lowerCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
lowerCAmelCase = self.get_inputs(__snake_case )
lowerCAmelCase = pipe(**__snake_case ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( lowerCamelCase__ ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__snake_case ,'hidden_sizes'))
self.parent.assertTrue(hasattr(__snake_case ,'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(__snake_case ,'num_attention_heads'))
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,SCREAMING_SNAKE_CASE__ : Any=6_4_0 ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="silu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : int=1_0 ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
__lowerCamelCase : Dict = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Any = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : List[Any] = last_hidden_size
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Dict = conv_kernel_size
__lowerCamelCase : Union[str, Any] = output_stride
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = classifier_dropout_prob
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : str = scope
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Any = None
__lowerCamelCase : str = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_labels)
__lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
__lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Union[str, Any]):
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Tuple = MobileViTModel(config=__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : List[str] = model(__snake_case)
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : Union[str, Any] = MobileViTForImageClassification(__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : List[Any] = model(__snake_case ,labels=__snake_case)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : List[str] = MobileViTForSemanticSegmentation(__snake_case)
model.to(__snake_case)
model.eval()
__lowerCamelCase : Tuple = model(__snake_case)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__lowerCamelCase : int = model(__snake_case ,labels=__snake_case)
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_UpperCAmelCase : List[str] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Tuple = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = MobileViTModelTester(self)
__lowerCamelCase : Any = MobileViTConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[int]):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def lowerCAmelCase ( self : List[Any]):
pass
@unittest.skip(reason='MobileViT does not output attentions')
def lowerCAmelCase ( self : Optional[int]):
pass
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(__snake_case)
__lowerCamelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self : Tuple):
pass
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case)
def lowerCAmelCase ( self : Optional[int]):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Optional[Any] = model_class(__snake_case)
model.to(__snake_case)
model.eval()
with torch.no_grad():
__lowerCamelCase : List[str] = model(**self._prepare_for_class(__snake_case ,__snake_case))
__lowerCamelCase : Optional[Any] = outputs.hidden_states
__lowerCamelCase : Dict = 5
self.assertEqual(len(__snake_case) ,__snake_case)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowerCamelCase : List[str] = 2
for i in range(len(__snake_case)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2)
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case)
@slow
def lowerCAmelCase ( self : Optional[Any]):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = MobileViTModel.from_pretrained(__snake_case)
self.assertIsNotNone(__snake_case)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Optional[Any]):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(__snake_case)
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[str] = prepare_img()
__lowerCamelCase : Any = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : int = model(**__snake_case)
# verify the logits
__lowerCamelCase : Dict = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,__snake_case)
__lowerCamelCase : List[str] = torch.tensor([-1.9364, -1.2327, -0.4653]).to(__snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4))
@slow
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : Optional[Any] = model.to(__snake_case)
__lowerCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : int = prepare_img()
__lowerCamelCase : List[str] = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : Any = model(**__snake_case)
__lowerCamelCase : List[str] = outputs.logits
# verify the logits
__lowerCamelCase : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2))
self.assertEqual(logits.shape ,__snake_case)
__lowerCamelCase : Any = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] ,device=__snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__snake_case ,atol=1E-4))
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : List[str] = model.to(__snake_case)
__lowerCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=__snake_case ,return_tensors='pt').to(__snake_case)
# forward pass
with torch.no_grad():
__lowerCamelCase : Tuple = model(**__snake_case)
__lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu()
__lowerCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=__snake_case ,target_sizes=[(5_0, 6_0)])
__lowerCamelCase : Optional[Any] = torch.Size((5_0, 6_0))
self.assertEqual(segmentation[0].shape ,__snake_case)
__lowerCamelCase : str = image_processor.post_process_semantic_segmentation(outputs=__snake_case)
__lowerCamelCase : int = torch.Size((3_2, 3_2))
self.assertEqual(segmentation[0].shape ,__snake_case)
| 73 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) ) | 297 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
with open(_A , """r""" ) as f:
SCREAMING_SNAKE_CASE = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : int="<cls>" ,lowerCamelCase__ : Dict="<pad>" ,lowerCamelCase__ : str="<mask>" ,lowerCamelCase__ : str="<eos>" ,**lowerCamelCase__ : List[str] ,) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE = load_vocab_file(__snake_case )
SCREAMING_SNAKE_CASE = dict(enumerate(self.all_tokens ) )
SCREAMING_SNAKE_CASE = {tok: ind for ind, tok in enumerate(self.all_tokens )}
SCREAMING_SNAKE_CASE = unk_token
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = pad_token
SCREAMING_SNAKE_CASE = mask_token
SCREAMING_SNAKE_CASE = eos_token
SCREAMING_SNAKE_CASE = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
return self._id_to_token.get(__snake_case ,self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> str:
'''simple docstring'''
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return text.split()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
return len(self._id_to_token )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self._token_to_id.get(__snake_case ,self._token_to_id.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
return self._id_to_token.get(__snake_case ,self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List ,lowerCamelCase__ : Optional[List] = None ,lowerCamelCase__ : bool = False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
SCREAMING_SNAKE_CASE = [1] + ([0] * len(__snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(__snake_case ) + [1]
return mask
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.join(__snake_case ,(filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__snake_case ,"""w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Union[List[str], List[AddedToken]] ,lowerCamelCase__ : bool = False ) -> List[Any]:
'''simple docstring'''
return super()._add_tokens(__snake_case ,special_tokens=__snake_case )
| 296 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_UpperCamelCase: Dict = logging.get_logger(__name__)
_UpperCamelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_UpperCamelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__ ( lowerCamelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LEDTokenizer
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[Any], lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : List[str]=None, lowerCAmelCase : Tuple=None, lowerCAmelCase : Dict="replace", lowerCAmelCase : int="<s>", lowerCAmelCase : Any="</s>", lowerCAmelCase : Optional[Any]="</s>", lowerCAmelCase : Optional[Any]="<s>", lowerCAmelCase : Optional[Any]="<unk>", lowerCAmelCase : List[str]="<pad>", lowerCAmelCase : int="<mask>", lowerCAmelCase : int=False, lowerCAmelCase : str=True, **lowerCAmelCase : Tuple, ) -> Optional[Any]:
super().__init__(
__snake_case, __snake_case, tokenizer_file=__snake_case, errors=__snake_case, bos_token=__snake_case, eos_token=__snake_case, sep_token=__snake_case, cls_token=__snake_case, unk_token=__snake_case, pad_token=__snake_case, mask_token=__snake_case, add_prefix_space=__snake_case, trim_offsets=__snake_case, **__snake_case, )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', __snake_case ) != add_prefix_space:
lowercase : List[Any] = getattr(__snake_case, pre_tok_state.pop('type' ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : Optional[Any] = pre_tok_class(**__snake_case )
lowercase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Dict = 'post_processor'
lowercase : int = getattr(self.backend_tokenizer, __snake_case, __snake_case )
if tokenizer_component_instance:
lowercase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Any = tuple(state['sep'] )
if "cls" in state:
lowercase : Any = tuple(state['cls'] )
lowercase : Optional[Any] = False
if state.get('add_prefix_space', __snake_case ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Optional[Any] = True
if state.get('trim_offsets', __snake_case ) != trim_offsets:
lowercase : List[Any] = trim_offsets
lowercase : Union[str, Any] = True
if changes_to_apply:
lowercase : int = getattr(__snake_case, state.pop('type' ) )
lowercase : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer, __snake_case, __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase ( self : Dict ) -> Dict:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase ( self : Dict, lowerCAmelCase : List[str] ) -> Union[str, Any]:
lowercase : Tuple = AddedToken(__snake_case, lstrip=__snake_case, rstrip=__snake_case ) if isinstance(__snake_case, __snake_case ) else value
lowercase : Optional[int] = value
def lowercase ( self : Optional[Any], *lowerCAmelCase : Any, **lowerCAmelCase : Union[str, Any] ) -> List[str]:
lowercase : Dict = kwargs.get('is_split_into_words', __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case, **__snake_case )
def lowercase ( self : Union[str, Any], *lowerCAmelCase : Optional[int], **lowerCAmelCase : List[str] ) -> int:
lowercase : Optional[int] = kwargs.get('is_split_into_words', __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case, **__snake_case )
def lowercase ( self : Dict, lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> List[Any]:
lowercase : Union[str, Any] = self._tokenizer.model.save(__snake_case, name=__snake_case )
return tuple(__snake_case )
def lowercase ( self : Union[str, Any], lowerCAmelCase : str, lowerCAmelCase : int=None ) -> str:
lowercase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase ( self : Optional[int], lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> Tuple:
lowercase : int = [self.sep_token_id]
lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : List[str], lowerCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding], lowerCAmelCase : Optional[int] = None, lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[bool] = None, ) -> Tuple:
lowercase : Optional[Any] = super()._pad(
encoded_inputs=__snake_case, max_length=__snake_case, padding_strategy=__snake_case, pad_to_multiple_of=__snake_case, return_attention_mask=__snake_case, )
# Load from model defaults
if return_attention_mask is None:
lowercase : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
lowercase : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 255 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
from __future__ import annotations
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
__lowercase = len(_A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
depth_first_search([] , [] , [] , _A , _A )
# Print all the boards
for board in boards:
for column in board:
print(_A )
print('''''' )
print(len(_A ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 210 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase ( __snake_case : Optional[int] = "" ):
lowercase_ : Tuple = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowercase_ : List[str] = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
lowercase_ : Any = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase_ : Union[str, Any] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_A , _A )
}
def lowercase ( __snake_case : Optional[int] = "IMDb_Top_250_Movies.csv" ):
lowercase_ : Optional[Any] = get_imdb_top_aaa_movies()
with open(_A , '''w''' , newline='''''' ) as out_file:
lowercase_ : Dict = csv.writer(_A )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 33 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCamelCase ( A__ = "laptop" ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F"""https://www.amazon.in/laptop/s?k={product}"""
UpperCamelCase = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
UpperCamelCase = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
UpperCamelCase = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
UpperCamelCase = item.ha.text
UpperCamelCase = 'https://www.amazon.in/' + item.ha.a['href']
UpperCamelCase = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
UpperCamelCase = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
UpperCamelCase = 'Not available'
try:
UpperCamelCase = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
UpperCamelCase = ''
try:
UpperCamelCase = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
UpperCamelCase = float('nan' )
except AttributeError:
pass
UpperCamelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCamelCase = ' '
UpperCamelCase = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCamelCase : str = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 28 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 297 | 0 |
def lowercase_ ( _A : Tuple , _A : Tuple ):
"""simple docstring"""
while second != 0:
lowerCamelCase__ : Union[str, Any] = first & second
first ^= second
lowerCamelCase__ : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Optional[int] = int(input("Enter the first number: ").strip())
A : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 184 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ : List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=1 ):
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = len(__snake_case ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ = n_copies
def __iter__( self : str ):
SCREAMING_SNAKE_CASE_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
SCREAMING_SNAKE_CASE_ = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = start_length
SCREAMING_SNAKE_CASE_ = eof_strings
SCREAMING_SNAKE_CASE_ = tokenizer
def __call__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=20 , **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = batch['ids'].shape[-1]
SCREAMING_SNAKE_CASE_ = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ = batch['task_id'].repeat(_A )
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
SCREAMING_SNAKE_CASE_ = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def UpperCAmelCase_ ( ) -> int:
# Setup configuration
SCREAMING_SNAKE_CASE_ = HfArgumentParser(_A )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ = 'false'
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ = load_dataset('openai_humaneval' )
SCREAMING_SNAKE_CASE_ = load_metric('code_eval' )
SCREAMING_SNAKE_CASE_ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
SCREAMING_SNAKE_CASE_ = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
SCREAMING_SNAKE_CASE_ = accelerator.prepare(_A , _A )
SCREAMING_SNAKE_CASE_ = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ = []
for task in tqdm(range(_A ) ):
SCREAMING_SNAKE_CASE_ = human_eval['test'][task]['test']
SCREAMING_SNAKE_CASE_ = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 225 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowercase ( lowerCamelCase__ ):
lowercase_ : Optional[int] =field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase_ : List[str] =Features({'''audio''': Audio()} )
lowercase_ : str =Features({'''labels''': ClassLabel} )
lowercase_ : List[Any] ='''audio'''
lowercase_ : List[Any] ='''labels'''
def A__ ( self ,A__):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] ,__snake_case):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.')
lowercase = copy.deepcopy(self)
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def A__ ( self):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 101 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = 'https://openaipublic.azureedge.net/jukebox/models/'
lowerCAmelCase__ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
UpperCamelCase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
UpperCamelCase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
UpperCamelCase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
UpperCamelCase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
UpperCamelCase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
UpperCamelCase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCamelCase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
UpperCamelCase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {}
import re
UpperCamelCase = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCamelCase = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCamelCase = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCamelCase = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
UpperCamelCase = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCamelCase = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
UpperCamelCase = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
UpperCamelCase = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
UpperCamelCase = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_A ):
UpperCamelCase = re_encoder_block_conv_in.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
UpperCamelCase = re_encoder_block_conv_in.sub(_A , _A )
elif re_encoder_block_resnet.fullmatch(_A ):
UpperCamelCase = re_encoder_block_resnet.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCamelCase = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
UpperCamelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_encoder_block_resnet.sub(_A , _A )
elif re_encoder_block_proj_out.fullmatch(_A ):
UpperCamelCase = re_encoder_block_proj_out.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
UpperCamelCase = re_encoder_block_proj_out.sub(_A , _A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_A ):
UpperCamelCase = re_decoder_block_conv_out.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
UpperCamelCase = re_decoder_block_conv_out.sub(_A , _A )
elif re_decoder_block_resnet.fullmatch(_A ):
UpperCamelCase = re_decoder_block_resnet.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCamelCase = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
UpperCamelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_decoder_block_resnet.sub(_A , _A )
elif re_decoder_block_proj_in.fullmatch(_A ):
UpperCamelCase = re_decoder_block_proj_in.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
UpperCamelCase = re_decoder_block_proj_in.sub(_A , _A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_A ):
UpperCamelCase = re_prior_cond_conv_out.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
UpperCamelCase = re_prior_cond_conv_out.sub(_A , _A )
elif re_prior_cond_resnet.fullmatch(_A ):
UpperCamelCase = re_prior_cond_resnet.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCamelCase = {'1': 1, '3': 2}[groups[-2]]
UpperCamelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
UpperCamelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
UpperCamelCase = prefix + resnet_block
UpperCamelCase = re_prior_cond_resnet.sub(_A , _A )
elif re_prior_cond_proj_in.fullmatch(_A ):
UpperCamelCase = re_prior_cond_proj_in.match(_A )
UpperCamelCase = regex_match.groups()
UpperCamelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
UpperCamelCase = re_prior_cond_proj_in.sub(_A , _A )
# keep original key
else:
UpperCamelCase = original_key
UpperCamelCase = replace_key(_A )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
UpperCamelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
UpperCamelCase = original_key
UpperCamelCase = original_key
UpperCamelCase = value
return new_dict
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
UpperCamelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_A )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_A )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
UpperCamelCase = MODEL_MAPPING[model_name.split("/" )[-1]]
UpperCamelCase = JukeboxConfig.from_pretrained(_A )
UpperCamelCase = JukeboxModel(_A )
UpperCamelCase = []
UpperCamelCase = {}
for i, dict_name in enumerate(_A ):
UpperCamelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
UpperCamelCase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
UpperCamelCase = old_dic[k]
elif k.endswith(".w" ):
UpperCamelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCamelCase = old_dic[k]
else:
UpperCamelCase = old_dic[k]
UpperCamelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
UpperCamelCase = fix_jukebox_keys(_A , model.state_dict() , _A , _A )
weight_dict.append(_A )
UpperCamelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_A )
for i in range(len(_A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_A ).mkdir(exist_ok=_A )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_A , _A )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 153 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class UpperCAmelCase_ ( lowerCamelCase__ ):
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(__snake_case )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = self._create_example_records()
lowerCAmelCase = Dataset.from_list(__snake_case )
self.assertListEqual(dset.column_names , ['col_1', 'col_2'] )
for i, r in enumerate(__snake_case ):
self.assertDictEqual(__snake_case , example_records[i] )
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = self._create_example_records()
lowerCAmelCase = Dataset.from_list(__snake_case )
lowerCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def __UpperCAmelCase ( self : Tuple ) -> Dict: # checks what happens with missing columns
lowerCAmelCase = [{'col_1': 1}, {'col_2': 'x'}]
lowerCAmelCase = Dataset.from_list(__snake_case )
self.assertDictEqual(dset[0] , {'col_1': 1} )
self.assertDictEqual(dset[1] , {'col_1': None} ) # NB: first record is used for columns
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: # checks if the type can be inferred from the second record
lowerCAmelCase = [{'col_1': []}, {'col_1': [1, 2]}]
lowerCAmelCase = Dataset.from_list(__snake_case )
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64' ) ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = Dataset.from_list([] )
self.assertEqual(len(__snake_case ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A_ ( lowerCamelCase__ ):
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : float):
return 0.0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Union[str, Any] = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCamelCase : int = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Dict = 5_1_2
__lowerCamelCase : Dict = [1] + [0] * (size - 1)
__lowerCamelCase : Tuple = [filter_type.process(_A ) for item in inputs]
__lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : List[str] = np.abs(np.fft.fft(_A ) )
__lowerCamelCase : List[Any] = 2_0 * np.logaa(_A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__lowerCamelCase : str = get_bounds(_A , _A )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(_A )
plt.show()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : int = 5_1_2
__lowerCamelCase : Dict = [1] + [0] * (size - 1)
__lowerCamelCase : Optional[Any] = [filter_type.process(_A ) for item in inputs]
__lowerCamelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCamelCase : Tuple = np.angle(np.fft.fft(_A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(_A , -2 * pi ) )
plt.show()
| 73 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 0 |
from timeit import timeit
SCREAMING_SNAKE_CASE_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_A ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_A ) // 2
SCREAMING_SNAKE_CASE = len(_A )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_A ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if len(_A ) <= 2:
return True
if s[0] == s[len(_A ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return s == s[::-1]
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = F"""all({name}(key) is value for key, value in test_data.items())"""
SCREAMING_SNAKE_CASE = F"""from __main__ import test_data, {name}"""
SCREAMING_SNAKE_CASE = 50_00_00
SCREAMING_SNAKE_CASE = timeit(stmt=_A , setup=_A , number=_A )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 296 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase: Optional[int] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = ['BeitFeatureExtractor']
_UpperCamelCase: List[str] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_UpperCamelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content | 297 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _UpperCamelCase :
"""simple docstring"""
__a : Optional[Any] = 42
__a : List[str] = None
__a : Tuple = None
__a : Optional[Any] = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowercase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(lowercase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowercase = get_distrib(node.left )
__lowercase = get_distrib(node.right )
__lowercase = 1 - left_distrib_excess
__lowercase = 1 - right_distrib_excess
__lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
__lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 210 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase ( __snake_case : List[Any] , __snake_case : List[str] ):
for e in env_keys:
lowercase_ : Optional[int] = int(os.environ.get(_A , -1 ) )
if val >= 0:
return val
return default
def lowercase ( __snake_case : List[Any] , __snake_case : List[str]=False ):
lowercase_ : int = os.environ.get(_A , str(_A ) )
return strtobool(_A ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase ( __snake_case : int , __snake_case : int="no" ):
lowercase_ : List[str] = os.environ.get(_A , str(_A ) )
return value
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13 | 297 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : str = get_logger(__name__)
A : Any = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _lowercase :
"""simple docstring"""
@add_start_docstrings(__snake_case )
def __call__( self : List[str] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowercase :
"""simple docstring"""
@add_start_docstrings(__snake_case )
def __call__( self : str , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
@add_start_docstrings(__snake_case )
def __call__( self : List[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
for processor in self:
lowerCamelCase__ : List[str] = inspect.signature(processor.__call__ ).parameters
if len(__snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ : Optional[Any] = processor(__snake_case , __snake_case , __snake_case , **__snake_case )
else:
lowerCamelCase__ : Tuple = processor(__snake_case , __snake_case , __snake_case )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : float ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ : Dict = temperature
def __call__( self : Optional[int] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = scores / self.temperature
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : float , __lowerCamelCase : float = -float("Inf" ) , __lowerCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__snake_case , __snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ : List[str] = top_p
lowerCamelCase__ : Optional[int] = filter_value
lowerCamelCase__ : Optional[int] = min_tokens_to_keep
def __call__( self : Tuple , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = lax.top_k(__snake_case , scores.shape[-1] )
lowerCamelCase__ : str = jnp.full_like(__snake_case , self.filter_value )
lowerCamelCase__ : Dict = jax.nn.softmax(__snake_case , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ : Dict = jnp.roll(__snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(__snake_case )
# min tokens to keep
lowerCamelCase__ : Optional[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__snake_case )
lowerCamelCase__ : List[Any] = jnp.where(__snake_case , __snake_case , __snake_case )
lowerCamelCase__ : Optional[int] = jax.lax.sort_key_val(__snake_case , __snake_case )[-1]
return next_scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : float = -float("Inf" ) , __lowerCamelCase : int = 1 ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ : Tuple = max(__snake_case , __snake_case )
lowerCamelCase__ : Any = filter_value
def __call__( self : str , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = scores.shape
lowerCamelCase__ : int = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ : List[Any] = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ : Optional[Any] = lax.top_k(__snake_case , __snake_case )
lowerCamelCase__ : Optional[Any] = jnp.broadcast_to((jnp.arange(__snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ : int = topk_scores.flatten()
lowerCamelCase__ : Union[str, Any] = topk_indices.flatten() + shift
lowerCamelCase__ : List[str] = next_scores_flat.at[topk_indices_flat].set(__snake_case )
lowerCamelCase__ : Dict = next_scores_flat.reshape(__snake_case , __snake_case )
return next_scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Any = bos_token_id
def __call__( self : Optional[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ : List[Any] = jnp.where(__snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , __snake_case )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = max_length
lowerCamelCase__ : Optional[Any] = eos_token_id
def __call__( self : List[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ : List[str] = jnp.where(__snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , __snake_case )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__snake_case , __snake_case ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ : str = min_length
lowerCamelCase__ : Dict = eos_token_id
def __call__( self : Optional[int] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ : str = jnp.where(__snake_case , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __snake_case )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Dict = list(__snake_case )
lowerCamelCase__ : str = begin_index
def __call__( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ : Optional[Any] = jnp.where(__snake_case , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __snake_case )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : list ):
'''simple docstring'''
lowerCamelCase__ : Tuple = list(__snake_case )
def __call__( self : Optional[Any] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : int = dict(__snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ : Optional[int] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ : str = force_token_array.at[index].set(__snake_case )
lowerCamelCase__ : List[str] = jnp.intaa(__snake_case )
def __call__( self : List[str] , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int ):
'''simple docstring'''
def _force_token(__lowerCamelCase : str ):
lowerCamelCase__ : int = scores.shape[0]
lowerCamelCase__ : str = self.force_token_array[generation_idx]
lowerCamelCase__ : int = jnp.ones_like(__snake_case , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ : List[Any] = lax.dynamic_update_slice(__snake_case , __snake_case , (0, current_token) )
return new_scores
lowerCamelCase__ : List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__snake_case ) , lambda: scores , ) , )
return scores
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = generate_config.eos_token_id
lowerCamelCase__ : Union[str, Any] = generate_config.no_timestamps_token_id
lowerCamelCase__ : Any = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__snake_case , "max_initial_timestamp_index" ):
lowerCamelCase__ : Any = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ : Optional[Any] = model_config.vocab_size
def __call__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Dict = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
lowerCamelCase__ : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , __snake_case , __snake_case )
lowerCamelCase__ : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __snake_case , )
lowerCamelCase__ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , __snake_case , __snake_case )
lowerCamelCase__ : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __snake_case , __snake_case , )
return jnp.where(
__snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __snake_case , )
lowerCamelCase__ : Optional[int] = jax.vmap(__snake_case )(__snake_case , __snake_case )
lowerCamelCase__ : Any = jnp.where(cur_len == self.begin_index , __snake_case , __snake_case )
lowerCamelCase__ : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __snake_case , )
lowerCamelCase__ : Union[str, Any] = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ : Any = jnp.where(
__snake_case , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ : Optional[int] = jax.nn.log_softmax(__snake_case , axis=-1 )
def handle_cumulative_probs(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
lowerCamelCase__ : Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ : List[str] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __snake_case , )
lowerCamelCase__ : str = jax.vmap(__snake_case )(__snake_case , __snake_case )
return scores
| 184 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : str = 'RegNetConfig'
# Base docstring
lowerCamelCase__ : str = 'facebook/regnet-y-040'
lowerCamelCase__ : Optional[int] = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ : Union[str, Any] = 'facebook/regnet-y-040'
lowerCamelCase__ : Any = 'tabby, tabby cat'
lowerCamelCase__ : Union[str, Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Optional[int] , ):
super().__init__(**__snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=__snake_case , strides=__snake_case , padding='VALID' , groups=__snake_case , use_bias=__snake_case , name='convolution' , )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.convolution(self.padding(__snake_case ) )
SCREAMING_SNAKE_CASE_ = self.normalization(__snake_case )
SCREAMING_SNAKE_CASE_ = self.activation(__snake_case )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : Dict ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = config.num_channels
SCREAMING_SNAKE_CASE_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = shape_list(__snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ = tf.transpose(__snake_case , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.embedder(__snake_case )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[int] ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=__snake_case , kernel_size=1 , strides=__snake_case , use_bias=__snake_case , name='convolution' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ):
return self.normalization(self.convolution(__snake_case ) , training=__snake_case )
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : List[str] ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name='pooler' )
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=__snake_case , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE_ = self.pooler(__snake_case )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ = layer_module(__snake_case )
SCREAMING_SNAKE_CASE_ = hidden_state * pooled
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name='layer.2' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(__snake_case )
SCREAMING_SNAKE_CASE_ = self.shortcut(__snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(__snake_case )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(__snake_case , stride=__snake_case , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
__snake_case , stride=__snake_case , groups=__snake_case , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(__snake_case , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(__snake_case , kernel_size=1 , activation=__snake_case , name='layer.3' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(__snake_case )
SCREAMING_SNAKE_CASE_ = self.shortcut(__snake_case )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(__snake_case )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : List[Any] ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ = [
# downsampling is done in the first layer with stride of 2
layer(__snake_case , __snake_case , __snake_case , stride=__snake_case , name='layers.0' ),
*[layer(__snake_case , __snake_case , __snake_case , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : int ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(__snake_case )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : Tuple ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__snake_case , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__snake_case , __snake_case , __snake_case , depth=__snake_case , name=F"stages.{i+1}" ) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ):
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ = stage_module(__snake_case )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__snake_case , hidden_states=__snake_case )
@keras_serializable
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowercase_ = RegNetConfig
def __init__( self : Optional[Any] , _lowerCAmelCase : Tuple , **_lowerCAmelCase : List[Any] ):
super().__init__(**__snake_case )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = TFRegNetEmbeddings(__snake_case , name='embedder' )
SCREAMING_SNAKE_CASE_ = TFRegNetEncoder(__snake_case , name='encoder' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__snake_case , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.embedder(__snake_case , training=__snake_case )
SCREAMING_SNAKE_CASE_ = self.encoder(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(__snake_case )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ = tf.transpose(__snake_case , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = tuple([tf.transpose(__snake_case , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__snake_case , pooler_output=__snake_case , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase_ = RegNetConfig
lowercase_ = "regnet"
lowercase_ = "pixel_values"
@property
def lowerCAmelCase_ ( self : int ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase__ : List[Any] = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ : Dict = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCamelCase__ , )
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Union[str, Any] ):
super().__init__(__snake_case , *__snake_case , **__snake_case )
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(__snake_case , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : List[str]=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
pixel_values=__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCamelCase__ , )
class lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : int ):
super().__init__(__snake_case , *__snake_case , **__snake_case )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(__snake_case , name='regnet' )
# classification head
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : str=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
__snake_case , output_hidden_states=__snake_case , return_dict=__snake_case , training=__snake_case )
SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ = self.classifier[0](__snake_case )
SCREAMING_SNAKE_CASE_ = self.classifier[1](__snake_case )
SCREAMING_SNAKE_CASE_ = None if labels is None else self.hf_compute_loss(labels=__snake_case , logits=__snake_case )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__snake_case , logits=__snake_case , hidden_states=outputs.hidden_states ) | 225 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) | 297 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
lowercase__ :Union[str, Any] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase ( lowerCamelCase__ ):
lowercase_ : Any ='''esm'''
def __init__( self ,A__=None ,A__=None ,A__=None ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__=0.1 ,A__=0.1 ,A__=1_0_2_6 ,A__=0.02 ,A__=1E-12 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=False ,A__=None ,A__=None ,**A__ ,):
super().__init__(pad_token_id=__snake_case ,mask_token_id=__snake_case ,**__snake_case)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = emb_layer_norm_before
lowercase = token_dropout
lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''')
lowercase = EsmFoldConfig()
elif isinstance(__snake_case ,__snake_case):
lowercase = EsmFoldConfig(**__snake_case)
lowercase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''')
lowercase = get_default_vocab_list()
else:
lowercase = vocab_list
else:
lowercase = None
lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'''use_esm_attn_map''' ,__snake_case):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''')
def A__ ( self):
lowercase = super().to_dict()
if isinstance(self.esmfold_config ,__snake_case):
lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : List[str] =None
lowercase_ : List[str] =True
lowercase_ : Tuple =False
lowercase_ : Optional[Any] =False
lowercase_ : int =False
lowercase_ : Any =0
lowercase_ : List[Any] =True
lowercase_ : Optional[int] =False
lowercase_ : Any =128
lowercase_ : Union[str, Any] =None
def A__ ( self):
if self.trunk is None:
lowercase = TrunkConfig()
elif isinstance(self.trunk ,__snake_case):
lowercase = TrunkConfig(**self.trunk)
def A__ ( self):
lowercase = asdict(self)
lowercase = self.trunk.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : Tuple =48
lowercase_ : Optional[Any] =1024
lowercase_ : Any =128
lowercase_ : int =32
lowercase_ : Tuple =32
lowercase_ : Optional[int] =32
lowercase_ : str =0
lowercase_ : List[str] =0
lowercase_ : Any =False
lowercase_ : Dict =4
lowercase_ : List[str] =128
lowercase_ : Dict =None
def A__ ( self):
if self.structure_module is None:
lowercase = StructureModuleConfig()
elif isinstance(self.structure_module ,__snake_case):
lowercase = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f' {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
lowercase = self.sequence_state_dim // self.sequence_head_width
lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.')
def A__ ( self):
lowercase = asdict(self)
lowercase = self.structure_module.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : Any =384
lowercase_ : Any =128
lowercase_ : Union[str, Any] =16
lowercase_ : Union[str, Any] =128
lowercase_ : Tuple =12
lowercase_ : List[str] =4
lowercase_ : Tuple =8
lowercase_ : Optional[Any] =0.1
lowercase_ : Tuple =8
lowercase_ : Optional[int] =1
lowercase_ : Optional[int] =2
lowercase_ : str =7
lowercase_ : List[Any] =10
lowercase_ : List[str] =1e-8
lowercase_ : Any =1e5
def A__ ( self):
return asdict(self)
def UpperCamelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 101 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) | 297 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ = StableDiffusionSAGPipeline
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = False
def snake_case_ (self ) -> int:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCamelCase = CLIPTextModel(__snake_case )
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case_ (self , __a , __a=0 ) -> int:
if str(__snake_case ).startswith("mps" ):
UpperCamelCase = torch.manual_seed(__snake_case )
else:
UpperCamelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCamelCase = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def snake_case_ (self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCamelCase = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase = sag_pipe.to(__snake_case )
sag_pipe.set_progress_bar_config(disable=__snake_case )
UpperCamelCase = '.'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
UpperCamelCase = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 153 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 297 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase = {
'do_resize': True,
'size': {'height': 1_8, 'width': 1_8},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCAmelCase = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def __UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase__ : List[str] ) -> Optional[int]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __UpperCAmelCase ( self : List[str] , **UpperCAmelCase__ : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def __UpperCAmelCase ( self : Dict ) -> str:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__snake_case , return_tensors='np' )
lowerCAmelCase = processor(images=__snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=__snake_case )
lowerCAmelCase = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(__snake_case ):
processor()
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(__snake_case )
lowerCAmelCase = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = SwinvaConfig()
__lowerCamelCase : Any = swinva_name.split('_' )
__lowerCamelCase : Optional[int] = name_split[1]
if "to" in name_split[3]:
__lowerCamelCase : List[Any] = int(name_split[3][-3:] )
else:
__lowerCamelCase : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
__lowerCamelCase : Optional[Any] = int(name_split[2][-2:] )
else:
__lowerCamelCase : Tuple = int(name_split[2][6:] )
if model_size == "tiny":
__lowerCamelCase : List[str] = 9_6
__lowerCamelCase : int = (2, 2, 6, 2)
__lowerCamelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowerCamelCase : Union[str, Any] = 9_6
__lowerCamelCase : List[Any] = (2, 2, 1_8, 2)
__lowerCamelCase : str = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowerCamelCase : Any = 1_2_8
__lowerCamelCase : Union[str, Any] = (2, 2, 1_8, 2)
__lowerCamelCase : Optional[Any] = (4, 8, 1_6, 3_2)
else:
__lowerCamelCase : Union[str, Any] = 1_9_2
__lowerCamelCase : List[Any] = (2, 2, 1_8, 2)
__lowerCamelCase : List[Any] = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
__lowerCamelCase : Optional[Any] = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__lowerCamelCase : Any = 2_1_8_4_1
__lowerCamelCase : Tuple = 'huggingface/label-files'
__lowerCamelCase : Optional[int] = 'imagenet-22k-id2label.json'
__lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Optional[int] = {int(_A ): v for k, v in idalabel.items()}
__lowerCamelCase : Union[str, Any] = idalabel
__lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
__lowerCamelCase : Tuple = 1_0_0_0
__lowerCamelCase : Optional[Any] = 'huggingface/label-files'
__lowerCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
__lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Tuple = {int(_A ): v for k, v in idalabel.items()}
__lowerCamelCase : List[str] = idalabel
__lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Tuple = img_size
__lowerCamelCase : Tuple = num_classes
__lowerCamelCase : Optional[Any] = embed_dim
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Union[str, Any] = window_size
return config
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
if "patch_embed.proj" in name:
__lowerCamelCase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase : str = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase : List[Any] = 'encoder.' + name
if "attn.proj" in name:
__lowerCamelCase : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCamelCase : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCamelCase : Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCamelCase : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCamelCase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCamelCase : List[str] = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__lowerCamelCase : List[str] = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__lowerCamelCase : Optional[int] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__lowerCamelCase : Tuple = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__lowerCamelCase : Optional[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
__lowerCamelCase : Tuple = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase : Dict = 'layernorm.bias'
if "head" in name:
__lowerCamelCase : str = name.replace('head' , 'classifier' )
else:
__lowerCamelCase : List[str] = 'swinv2.' + name
return name
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__lowerCamelCase : int = orig_state_dict.pop(_A )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCamelCase : str = key.split('.' )
__lowerCamelCase : Any = int(key_split[1] )
__lowerCamelCase : Any = int(key_split[3] )
__lowerCamelCase : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCamelCase : List[str] = val[:dim, :]
__lowerCamelCase : str = val[dim : dim * 2, :]
__lowerCamelCase : str = val[-dim:, :]
else:
__lowerCamelCase : str = val[:dim]
__lowerCamelCase : int = val[
dim : dim * 2
]
__lowerCamelCase : List[str] = val[-dim:]
else:
__lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Any = timm.create_model(_A , pretrained=_A )
timm_model.eval()
__lowerCamelCase : str = get_swinva_config(_A )
__lowerCamelCase : Optional[Any] = SwinvaForImageClassification(_A )
model.eval()
__lowerCamelCase : Any = convert_state_dict(timm_model.state_dict() , _A )
model.load_state_dict(_A )
__lowerCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
__lowerCamelCase : Any = Image.open(requests.get(_A , stream=_A ).raw )
__lowerCamelCase : Any = image_processor(images=_A , return_tensors='pt' )
__lowerCamelCase : Optional[Any] = timm_model(inputs['pixel_values'] )
__lowerCamelCase : int = model(**_A ).logits
assert torch.allclose(_A , _A , atol=1e-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_A )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a =parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 73 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) ) | 297 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
SCREAMING_SNAKE_CASE = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(__snake_case ) ,__snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(transpose(__snake_case ) ,x.transpose() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(transpose(__snake_case ,axes=(1, 2, 0) ) ,x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) ,transpose(__snake_case ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ,axes=(1, 2, 0) ) ,transpose(__snake_case ,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) ,transpose(__snake_case ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ,axes=(1, 2, 0) ) ,transpose(__snake_case ,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) ,np.asarray(transpose(__snake_case ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ,axes=(1, 2, 0) ) ,np.asarray(transpose(__snake_case ,axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(reshape(__snake_case ,(4, 3) ) ,np.reshape(__snake_case ,(4, 3) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
self.assertTrue(np.allclose(reshape(__snake_case ,(12, 5) ) ,np.reshape(__snake_case ,(12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(4, 3) ) ,reshape(__snake_case ,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(12, 5) ) ,reshape(__snake_case ,(12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(4, 3) ) ,reshape(__snake_case ,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(12, 5) ) ,reshape(__snake_case ,(12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(4, 3) ) ,np.asarray(reshape(__snake_case ,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 ,5 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case ,(12, 5) ) ,np.asarray(reshape(__snake_case ,(12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(1 ,3 ,4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) ,np.squeeze(__snake_case ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 ,4 ,1 ,5 )
self.assertTrue(np.allclose(squeeze(__snake_case ,axis=2 ) ,np.squeeze(__snake_case ,axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(1 ,3 ,4 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) ,squeeze(__snake_case ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 ,4 ,1 ,5 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ,axis=2 ) ,squeeze(__snake_case ,axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(1 ,3 ,4 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) ,squeeze(__snake_case ).numpy() ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 ,4 ,1 ,5 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ,axis=2 ) ,squeeze(__snake_case ,axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(1 ,3 ,4 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) ,np.asarray(squeeze(__snake_case ) ) ) )
SCREAMING_SNAKE_CASE = np.random.randn(1 ,4 ,1 ,5 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ,axis=2 ) ,np.asarray(squeeze(__snake_case ,axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
self.assertTrue(np.allclose(expand_dims(__snake_case ,axis=1 ) ,np.expand_dims(__snake_case ,axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case ,axis=1 ) ,expand_dims(__snake_case ,axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case ,axis=1 ) ,expand_dims(__snake_case ,axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.random.randn(3 ,4 )
SCREAMING_SNAKE_CASE = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case ,axis=1 ) ,np.asarray(expand_dims(__snake_case ,axis=1 ) ) ) )
| 296 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase: Any = 'docs/source/en/_toctree.yml'
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = defaultdict(_A )
lowercase : List[Any] = []
lowercase : List[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_A )
lowercase : Any = new_doc_list
lowercase : List[str] = [key for key, value in counts.items() if value > 1]
lowercase : List[str] = []
for duplicate_key in duplicates:
lowercase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowercase : str = sorted(_A , key=lambda _UpperCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_A )
# Sort
return overview_doc
def lowercase__ ( _UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
with open(_A , encoding='utf-8' ) as f:
lowercase : int = yaml.safe_load(f.read() )
# Get to the API doc
lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase : Any = content[api_idx]['sections']
# Then to the model doc
lowercase : List[str] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase : List[str] = api_doc[scheduler_idx]['sections']
lowercase : Union[str, Any] = clean_doc_toc(_A )
lowercase : Any = False
if new_scheduler_doc != scheduler_doc:
lowercase : Any = True
if overwrite:
lowercase : Tuple = new_scheduler_doc
if diff:
if overwrite:
lowercase : Dict = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase__ ( _UpperCAmelCase=False ) -> str:
'''simple docstring'''
with open(_A , encoding='utf-8' ) as f:
lowercase : str = yaml.safe_load(f.read() )
# Get to the API doc
lowercase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase : Dict = content[api_idx]['sections']
# Then to the model doc
lowercase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase : int = False
lowercase : Optional[int] = api_doc[pipeline_idx]['sections']
lowercase : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase : Any = pipeline_doc['section']
lowercase : Optional[Any] = clean_doc_toc(_A )
if overwrite:
lowercase : int = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
lowercase : Dict = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
lowercase : Optional[Any] = True
if overwrite:
lowercase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
lowercase : Dict = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase: Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase: Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 255 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowercase = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = json.load(__snake_case )
else:
try:
__lowercase = baseaa.urlsafe_baadecode(__snake_case ).decode('''utf-8''' )
__lowercase = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
__lowercase = config
self.set_stage_and_offload()
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
__lowercase = False
if self.is_zeroa() or self.is_zeroa():
__lowercase = set(['''cpu''', '''nvme'''] )
__lowercase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowercase = True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
__lowercase = nodes.pop()
for node in nodes:
__lowercase = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
__lowercase = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
for node in nodes:
__lowercase = config
__lowercase = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return self._stage == 2
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return self._stage == 3
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return self._offload
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = engine
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
__lowercase = hasattr(self.optimizer , '''overflow''' )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=None ) -> Optional[int]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=0.001 , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = params
__lowercase = lr
__lowercase = weight_decay
__lowercase = kwargs
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = optimizer
__lowercase = total_num_steps
__lowercase = warmup_num_steps
__lowercase = kwargs | 210 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase__ )
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : List[Any] = 42
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
@dataclass(frozen=lowerCamelCase__ )
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : List[Any] = 42
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 42
def __init__( self : Union[str, Any] , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = None , A : Tuple=False , A : bool = False , ) -> Any:
lowercase_ : Optional[Any] = hans_processors[task]()
lowercase_ : List[str] = os.path.join(
__snake_case , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__snake_case ) , __snake_case , ) , )
lowercase_ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ : List[Any] = label_list[2], label_list[1]
lowercase_ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ : Dict = cached_features_file + '.lock'
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowercase_ : List[Any] = torch.load(__snake_case )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowercase_ : Optional[Any] = (
processor.get_dev_examples(__snake_case ) if evaluate else processor.get_train_examples(__snake_case )
)
logger.info('''Training examples: %s''' , len(__snake_case ) )
lowercase_ : Optional[int] = hans_convert_examples_to_features(__snake_case , __snake_case , __snake_case , __snake_case )
logger.info('''Saving features into cached file %s''' , __snake_case )
torch.save(self.features , __snake_case )
def __len__( self : List[Any] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Optional[int] , A : List[str] ) -> int:
return self.features[i]
def A ( self : Optional[int] ) -> int:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42
def __init__( self : str , A : str , A : PreTrainedTokenizer , A : str , A : Optional[int] = 1_28 , A : Optional[Any]=False , A : bool = False , ) -> List[str]:
lowercase_ : Union[str, Any] = hans_processors[task]()
lowercase_ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ : Dict = label_list[2], label_list[1]
lowercase_ : Any = label_list
lowercase_ : Optional[int] = processor.get_dev_examples(__snake_case ) if evaluate else processor.get_train_examples(__snake_case )
lowercase_ : List[Any] = hans_convert_examples_to_features(__snake_case , __snake_case , __snake_case , __snake_case )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__snake_case )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase_ : Dict = tf.data.Dataset.from_generator(
__snake_case , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A ( self : str ) -> List[Any]:
return self.dataset
def __len__( self : int ) -> List[str]:
return len(self.features )
def __getitem__( self : Optional[Any] , A : Optional[int] ) -> Tuple:
return self.features[i]
def A ( self : Union[str, Any] ) -> Optional[Any]:
return self.label_list
class _UpperCAmelCase ( lowerCamelCase__ ):
def A ( self : List[Any] , A : Union[str, Any] ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(__snake_case , '''heuristics_train_set.txt''' ) ) , '''train''' )
def A ( self : List[Any] , A : Any ) -> str:
return self._create_examples(self._read_tsv(os.path.join(__snake_case , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def A ( self : Union[str, Any] ) -> Dict:
return ["contradiction", "entailment", "neutral"]
def A ( self : Dict , A : Tuple , A : Tuple ) -> List[str]:
lowercase_ : Union[str, Any] = []
for i, line in enumerate(__snake_case ):
if i == 0:
continue
lowercase_ : Optional[Any] = '%s-%s' % (set_type, line[0])
lowercase_ : Dict = line[5]
lowercase_ : str = line[6]
lowercase_ : Union[str, Any] = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase_ : Optional[Any] = line[0]
examples.append(InputExample(guid=__snake_case , text_a=__snake_case , text_b=__snake_case , label=__snake_case , pairID=__snake_case ) )
return examples
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] , ):
lowercase_ : Optional[int] = {label: i for i, label in enumerate(_A )}
lowercase_ : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_A ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase_ : Dict = tokenizer(
example.text_a , example.text_b , add_special_tokens=_A , max_length=_A , padding='''max_length''' , truncation=_A , return_overflowing_tokens=_A , )
lowercase_ : List[Any] = label_map[example.label] if example.label in label_map else 0
lowercase_ : Optional[int] = int(example.pairID )
features.append(InputFeatures(**_A , label=_A , pairID=_A ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
__A : List[Any] = {
'hans': 3,
}
__A : Dict = {
'hans': HansProcessor,
}
| 33 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 5_0_2_5_7 , UpperCamelCase__ : int = 1_0_2_4 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : int = 1_2 , UpperCamelCase__ : int = 1_2 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
UpperCamelCase = prefix_inner_dim
UpperCamelCase = prefix_hidden_dim
UpperCamelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase = (
nn.Linear(self.prefix_hidden_dim , __snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase = GPTaConfig(
vocab_size=__snake_case , n_positions=__snake_case , n_embd=__snake_case , n_layer=__snake_case , n_head=__snake_case , n_inner=__snake_case , activation_function=__snake_case , resid_pdrop=__snake_case , embd_pdrop=__snake_case , attn_pdrop=__snake_case , layer_norm_epsilon=__snake_case , initializer_range=__snake_case , scale_attn_weights=__snake_case , use_cache=__snake_case , scale_attn_by_inverse_layer_idx=__snake_case , reorder_and_upcast_attn=__snake_case , )
UpperCamelCase = GPTaLMHeadModel(__snake_case )
def A ( self : List[Any] , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
"""simple docstring"""
UpperCamelCase = self.transformer.transformer.wte(__snake_case )
UpperCamelCase = self.encode_prefix(__snake_case )
UpperCamelCase = self.decode_prefix(__snake_case )
UpperCamelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase = self.transformer(inputs_embeds=__snake_case , labels=__snake_case , attention_mask=__snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
"""simple docstring"""
return torch.zeros(__snake_case , self.prefix_length , dtype=torch.intaa , device=__snake_case )
def A ( self : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.encode_prefix(__snake_case )
@torch.no_grad()
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = torch.split(__snake_case , 1 , dim=0 )
UpperCamelCase = []
UpperCamelCase = []
for feature in features:
UpperCamelCase = self.decode_prefix(feature.to(__snake_case ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase = self.generate_beam(
input_embeds=__snake_case , device=__snake_case , eos_token_id=__snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase = torch.stack(__snake_case )
UpperCamelCase = torch.stack(__snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A ( self : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 6_7 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
UpperCamelCase = eos_token_id
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = torch.ones(__snake_case , device=__snake_case , dtype=torch.int )
UpperCamelCase = torch.zeros(__snake_case , device=__snake_case , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase = input_embeds
else:
UpperCamelCase = self.transformer.transformer.wte(__snake_case )
for i in range(__snake_case ):
UpperCamelCase = self.transformer(inputs_embeds=__snake_case )
UpperCamelCase = outputs.logits
UpperCamelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase = logits.topk(__snake_case , -1 )
UpperCamelCase = generated.expand(__snake_case , *generated.shape[1:] )
UpperCamelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase = next_tokens
else:
UpperCamelCase = tokens.expand(__snake_case , *tokens.shape[1:] )
UpperCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase = -float(np.inf )
UpperCamelCase = 0
UpperCamelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase = scores_sum / seq_lengths[:, None]
UpperCamelCase = scores_sum_average.view(-1 ).topk(__snake_case , -1 )
UpperCamelCase = next_tokens // scores_sum.shape[1]
UpperCamelCase = seq_lengths[next_tokens_source]
UpperCamelCase = next_tokens % scores_sum.shape[1]
UpperCamelCase = next_tokens.unsqueeze(1 )
UpperCamelCase = tokens[next_tokens_source]
UpperCamelCase = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase = generated[next_tokens_source]
UpperCamelCase = scores_sum_average * seq_lengths
UpperCamelCase = is_stopped[next_tokens_source]
UpperCamelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase = is_stopped + next_tokens.eq(__snake_case ).squeeze()
if is_stopped.all():
break
UpperCamelCase = scores / seq_lengths
UpperCamelCase = scores.argsort(descending=__snake_case )
# tokens tensors are already padded to max_seq_length
UpperCamelCase = [tokens[i] for i in order]
UpperCamelCase = torch.stack(__snake_case , dim=0 )
UpperCamelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 28 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 297 | 0 |
import os
import numpy
import onnx
def lowercase_ ( _A : Any , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Any = a.name
lowerCamelCase__ : Tuple = b.name
lowerCamelCase__ : List[Any] = ''
lowerCamelCase__ : Union[str, Any] = ''
lowerCamelCase__ : Tuple = a == b
lowerCamelCase__ : List[Any] = name_a
lowerCamelCase__ : List[Any] = name_b
return res
def lowercase_ ( _A : Union[str, Any] , _A : Dict , _A : Tuple ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def lowercase_ ( _A : Union[str, Any] , _A : str , _A : Union[str, Any] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def lowercase_ ( _A : Dict , _A : Dict , _A : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = list(model.graph.initializer )
lowerCamelCase__ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase__ : int = inits[i].name
lowerCamelCase__ : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def lowercase_ ( _A : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = os.path.dirname(_A )
lowerCamelCase__ : Dict = os.path.basename(_A )
lowerCamelCase__ : List[str] = onnx.load(os.path.join(_A , _A ) )
lowerCamelCase__ : Union[str, Any] = list(model.graph.initializer )
lowerCamelCase__ : Optional[Any] = set()
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Dict = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
lowerCamelCase__ : Any = inits[j].data_type
lowerCamelCase__ : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _A )
total_reduced_size += mem_size
lowerCamelCase__ : Optional[Any] = inits[i].name
lowerCamelCase__ : str = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
lowerCamelCase__ : int = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
lowerCamelCase__ : int = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
lowerCamelCase__ : int = 'optimized_' + model_file_name
lowerCamelCase__ : Optional[Any] = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 184 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "AutoImageProcessor"
lowercase_ = "AutoTokenizer"
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
super().__init__(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : Optional[int] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : int ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(__snake_case , return_tensors=__snake_case , **__snake_case )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowerCAmelCase_ ( self : Union[str, Any] , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[str] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase_ ( self : List[Any] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : int ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase_ ( self : List[str] ):
return ["input_ids", "attention_mask", "pixel_values"] | 225 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :int = TypeVar("DatasetType", Dataset, IterableDataset)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.' )
if i == 0:
lowercase = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.' )
if i == 0:
lowercase = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 101 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _lowerCamelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ = BertGenerationTokenizer
UpperCAmelCase_ = False
UpperCAmelCase_ = True
def snake_case_ (self ) -> Dict:
super().setUp()
UpperCamelCase = BertGenerationTokenizer(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ (self ) -> List[str]:
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__snake_case ) , 10_02 )
def snake_case_ (self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = BertGenerationTokenizer(__snake_case , keep_accents=__snake_case )
UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def snake_case_ (self ) -> Dict:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = 'Hello World!'
UpperCamelCase = [1_85_36, 22_60, 1_01]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@require_torch
@slow
def snake_case_ (self ) -> Dict:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase = ' '.join(__snake_case )
UpperCamelCase = self.big_tokenizer.encode_plus(__snake_case , return_tensors="pt" , return_token_type_ids=__snake_case )
UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__snake_case )
UpperCamelCase = BertGenerationConfig()
UpperCamelCase = BertGenerationEncoder(__snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__snake_case )
model(**__snake_case )
@slow
def snake_case_ (self ) -> Any:
# fmt: off
UpperCamelCase = {'input_ids': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 153 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__snake_case =logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case ='\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def a_ ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=8 ):
lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( lowerCamelCase__ ):
def __init__( self : Dict , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ) -> str:
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ) -> Tuple:
if latents is None:
lowerCAmelCase = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCAmelCase = latents.to(__snake_case )
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Optional[Any]=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[Any]=0 ) -> Optional[Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self : str ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 1_0_0 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) -> str:
lowerCAmelCase = self._execution_device
lowerCAmelCase = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
lowerCAmelCase = torch.cat(__snake_case , dim=0 )
lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__snake_case , __snake_case ):
lowerCAmelCase = torch.cat(__snake_case , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase = image_embeds.repeat_interleave(__snake_case , dim=0 )
lowerCAmelCase = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.unet.config.in_channels
lowerCAmelCase = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase = {'image_embeds': image_embeds}
lowerCAmelCase = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase = noise_pred.chunk(2 )
lowerCAmelCase = variance_pred.chunk(2 )
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
lowerCAmelCase = self.movq.decode(__snake_case , force_not_quantize=__snake_case )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCAmelCase = image * 0.5 + 0.5
lowerCAmelCase = image.clamp(0 , 1 )
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a ='\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
a ='\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
a ='\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Dict):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Value('string' ,id='sequence'),
}) ,codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] ,reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : int=False):
if rouge_types is None:
__lowerCamelCase : Dict = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__lowerCamelCase : Any = rouge_scorer.RougeScorer(rouge_types=__snake_case ,use_stemmer=__snake_case)
if use_aggregator:
__lowerCamelCase : List[Any] = scoring.BootstrapAggregator()
else:
__lowerCamelCase : List[Any] = []
for ref, pred in zip(__snake_case ,__snake_case):
__lowerCamelCase : List[Any] = scorer.score(__snake_case ,__snake_case)
if use_aggregator:
aggregator.add_scores(__snake_case)
else:
scores.append(__snake_case)
if use_aggregator:
__lowerCamelCase : Optional[int] = aggregator.aggregate()
else:
__lowerCamelCase : Union[str, Any] = {}
for key in scores[0]:
__lowerCamelCase : str = [score[key] for score in scores]
return result
| 73 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = StableDiffusionLDMaDPipeline
__snake_case : Tuple = TEXT_TO_IMAGE_PARAMS
__snake_case : str = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=__snake_case ,set_alpha_to_one=__snake_case ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(__snake_case )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> int:
'''simple docstring'''
if str(__snake_case ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__snake_case )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
SCREAMING_SNAKE_CASE = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__snake_case )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__snake_case )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop("""prompt""" )]
SCREAMING_SNAKE_CASE = ldmad_pipe.tokenizer(
__snake_case ,padding="""max_length""" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=__snake_case ,return_tensors="""pt""" ,)
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe.text_encoder(__snake_case )[0]
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__snake_case )
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline(**__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__snake_case )
SCREAMING_SNAKE_CASE = 'french fries'
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case ,negative_prompt=__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
SCREAMING_SNAKE_CASE = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any]="cpu" ,lowerCamelCase__ : Optional[Any]=torch.floataa ,lowerCamelCase__ : List[Any]=0 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
SCREAMING_SNAKE_CASE = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE = torch.from_numpy(__snake_case ).to(device=__snake_case ,dtype=__snake_case )
SCREAMING_SNAKE_CASE = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
SCREAMING_SNAKE_CASE = ldmad_pipe.to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_inputs(__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
SCREAMING_SNAKE_CASE = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
SCREAMING_SNAKE_CASE = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Any="cpu" ,lowerCamelCase__ : Optional[Any]=torch.floataa ,lowerCamelCase__ : Optional[Any]=0 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
SCREAMING_SNAKE_CASE = np.random.RandomState(__snake_case ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE = torch.from_numpy(__snake_case ).to(device=__snake_case ,dtype=__snake_case )
SCREAMING_SNAKE_CASE = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_inputs(__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = 0.495586
SCREAMING_SNAKE_CASE = 0.33795515
SCREAMING_SNAKE_CASE = 112.48518
SCREAMING_SNAKE_CASE = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__snake_case )
ldmad_pipe.set_progress_bar_config(disable=__snake_case )
SCREAMING_SNAKE_CASE = self.get_inputs(__snake_case )
SCREAMING_SNAKE_CASE = ldmad_pipe(**__snake_case )
SCREAMING_SNAKE_CASE = output.rgb, output.depth
SCREAMING_SNAKE_CASE = 0.4194127
SCREAMING_SNAKE_CASE = 0.35375586
SCREAMING_SNAKE_CASE = 0.5638502
SCREAMING_SNAKE_CASE = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 296 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_UpperCamelCase: List[str] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_UpperCamelCase: Optional[int] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_UpperCamelCase: Optional[int] = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_UpperCamelCase: Any = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_UpperCamelCase: List[str] = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : Union[str, Any] ) -> Tuple:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ), homepage='https://github.com/openai/human-eval', codebase_urls=['https://github.com/openai/human-eval'], reference_urls=['https://github.com/openai/human-eval'], license=_LICENSE, )
def lowercase ( self : Dict, lowerCAmelCase : int, lowerCAmelCase : List[Any], lowerCAmelCase : int=[1, 10, 100], lowerCAmelCase : int=4, lowerCAmelCase : List[str]=3.0 ) -> Tuple:
if os.getenv('HF_ALLOW_CODE_EVAL', 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__snake_case ) as executor:
lowercase : int = []
lowercase : List[Any] = Counter()
lowercase : Union[str, Any] = 0
lowercase : Dict = defaultdict(__snake_case )
for task_id, (candidates, test_case) in enumerate(zip(__snake_case, __snake_case ) ):
for candidate in candidates:
lowercase : Optional[int] = candidate + '\n' + test_case
lowercase : Tuple = (test_program, timeout, task_id, completion_id[task_id])
lowercase : List[str] = executor.submit(__snake_case, *__snake_case )
futures.append(__snake_case )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__snake_case ):
lowercase : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowercase : Tuple = [], []
for result in results.values():
result.sort()
lowercase : str = [r[1]['passed'] for r in result]
total.append(len(__snake_case ) )
correct.append(sum(__snake_case ) )
lowercase : Dict = np.array(__snake_case )
lowercase : Any = np.array(__snake_case )
lowercase : Optional[Any] = k
lowercase : Tuple = {f'''pass@{k}''': estimate_pass_at_k(__snake_case, __snake_case, __snake_case ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def estimator(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
lowercase : List[str] = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
lowercase : Union[str, Any] = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] )
| 255 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content | 297 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_A ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__lowercase = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__lowercase = PipelineDataFormat.from_str(
format=_A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_A , _A )
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = nlp
__lowercase = reader
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__snake_case , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__snake_case , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__snake_case , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__snake_case , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__snake_case , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__snake_case , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__snake_case , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__snake_case , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__snake_case )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self._nlp, []
for entry in self._reader:
__lowercase = nlp(**__snake_case ) if self._reader.is_multi_columns else nlp(__snake_case )
if isinstance(__snake_case , __snake_case ):
outputs.append(__snake_case )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__lowercase = self._reader.save_binary(__snake_case )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__snake_case ) | 210 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase__ ( _A = "laptop" ):
a : Any = f"""https://www.amazon.in/laptop/s?k={product}"""
a : Tuple = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
a : Any = BeautifulSoup(requests.get(_A , headers=_A ).text )
# Initialize a Pandas dataframe with the column titles
a : Any = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
a : Optional[int] = item.ha.text
a : str = 'https://www.amazon.in/' + item.ha.a['href']
a : List[str] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
a : Optional[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
a : Union[str, Any] = 'Not available'
try:
a : str = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
a : int = ''
try:
a : Union[str, Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
a : Any = float('nan' )
except AttributeError:
pass
a : Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a : Any = ' '
a : List[str] = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase: str = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv") | 297 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Dict ):
lowercase_ : str = [0] * len(_A )
lowercase_ : Optional[Any] = []
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
lowercase_ : Tuple = queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print('''Cycle exists''' )
else:
print(_A )
# Adjacency List of Graph
__A : Union[str, Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
'''simple docstring'''
import os
from pathlib import Path
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
UpperCamelCase = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=_A , exist_ok=_A )
UpperCamelCase = os.path.join(_A , 'README.md' )
print(F"""Generating {path}""" )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(_A )
# make sure we are under the root of the project
_lowerCamelCase : int = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase : List[str] = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase : Optional[int] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 28 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """t5"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Union[str, Any] , __snake_case : int=3_21_28 , __snake_case : str=5_12 , __snake_case : Dict=64 , __snake_case : Optional[int]=20_48 , __snake_case : Tuple=6 , __snake_case : Any=None , __snake_case : Optional[int]=8 , __snake_case : str=32 , __snake_case : Union[str, Any]=1_28 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=1e-6 , __snake_case : int=1.0 , __snake_case : Optional[int]="relu" , __snake_case : Any=True , __snake_case : List[str]=True , __snake_case : Union[str, Any]=0 , __snake_case : Dict=1 , **__snake_case : Optional[int] , ):
a : Optional[int] = vocab_size
a : Dict = d_model
a : Union[str, Any] = d_kv
a : Dict = d_ff
a : Tuple = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : int = num_heads
a : str = relative_attention_num_buckets
a : List[Any] = relative_attention_max_distance
a : int = dropout_rate
a : Tuple = layer_norm_epsilon
a : str = initializer_factor
a : List[Any] = feed_forward_proj
a : Union[str, Any] = use_cache
a : List[str] = self.feed_forward_proj.split('-' )
a : int = act_info[-1]
a : Union[str, Any] = act_info[0] == 'gated'
if len(__snake_case ) > 1 and act_info[0] != "gated" or len(__snake_case ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : Optional[Any] = 'gelu_new'
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , **__snake_case , )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[int] ):
a : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : Dict = 'past_encoder_sequence + sequence'
a : Dict = {0: 'batch'}
a : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction='inputs' )
return common_inputs
@property
def lowercase_ ( self : List[Any] ):
return 13 | 297 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
A__ = "timm_backbone"
def __init__( self : Optional[Any] , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=3 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
lowerCamelCase__ : Optional[Any] = backbone
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Any = features_only
lowerCamelCase__ : List[Any] = use_pretrained_backbone
lowerCamelCase__ : Union[str, Any] = True
lowerCamelCase__ : Tuple = out_indices if out_indices is not None else (-1,)
| 184 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 297 | 0 |
from collections import namedtuple
lowerCamelCase__ : Dict = namedtuple('from_to', 'from_ to')
lowerCamelCase__ : Optional[int] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> str:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ', '.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ', '.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod() | 225 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) | 297 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 101 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class a__( unittest.TestCase ):
@slow
def lowercase_ ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[int] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Any = TFAutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForCausalLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : List[str] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : Tuple = AutoModelForMaskedLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : int ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case )
a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case )
a , a : str = AutoModelForSeqaSeqLM.from_pretrained(
__snake_case , output_loading_info=__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Tuple = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowercase_ ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowercase_ ( self : Tuple ):
a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowercase_ ( self : Any ):
a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) | 297 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCamelCase :
def __init__(self , __a , __a=13 , __a=7 , __a=False , __a=True , __a=False , __a=False , __a=19 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Any:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def snake_case_ (self ) -> Any:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def snake_case_ (self , __a , __a , __a , __a , __a , __a ) -> Dict:
UpperCamelCase = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
UpperCamelCase = model(__snake_case , attention_mask=__snake_case )
UpperCamelCase = model(__snake_case )
UpperCamelCase = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ (self ) -> int:
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ = False
UpperCAmelCase_ = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase_ = ()
UpperCAmelCase_ = {} if is_torch_available() else {}
UpperCAmelCase_ = False
def snake_case_ (self ) -> str:
UpperCamelCase = EsmFoldModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def snake_case_ (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip("Does not support attention outputs" )
def snake_case_ (self ) -> int:
pass
@unittest.skip
def snake_case_ (self ) -> Dict:
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ (self ) -> Optional[Any]:
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case_ (self ) -> List[str]:
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def snake_case_ (self ) -> List[str]:
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ (self ) -> Any:
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ (self ) -> Optional[Any]:
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ (self ) -> Optional[Any]:
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ (self ) -> Tuple:
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case_ (self ) -> Optional[int]:
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def snake_case_ (self ) -> Any:
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def snake_case_ (self ) -> int:
pass
@unittest.skip("ESMFold only has one output format." )
def snake_case_ (self ) -> str:
pass
@unittest.skip("This test doesn\'t work for ESMFold and doesn\'t test core functionality" )
def snake_case_ (self ) -> Tuple:
pass
@unittest.skip("ESMFold does not support input chunking." )
def snake_case_ (self ) -> int:
pass
@unittest.skip("ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments." )
def snake_case_ (self ) -> List[Any]:
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def snake_case_ (self ) -> Optional[int]:
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def snake_case_ (self ) -> Dict:
pass
@unittest.skip("ESMFold doesn\'t support torchscript compilation." )
def snake_case_ (self ) -> Any:
pass
@unittest.skip("ESMFold doesn\'t support data parallel." )
def snake_case_ (self ) -> Tuple:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ (self ) -> Optional[int]:
pass
@require_torch
class _lowerCamelCase ( lowerCamelCase__ ):
@slow
def snake_case_ (self ) -> str:
UpperCamelCase = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
UpperCamelCase = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase = model(__snake_case )['positions']
UpperCamelCase = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
| 153 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 297 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ='▁'
__snake_case ={'vocab_file': 'spiece.model'}
__snake_case ={
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__snake_case ={
'google/reformer-crime-and-punishment': 524_288,
}
class UpperCAmelCase_ ( lowerCamelCase__ ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Tuple=[] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Optional[int] , ) -> Optional[int]:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case , unk_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> List[Any]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.piece_to_id(__snake_case )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> List[str]:
if index < self.sp_model.get_piece_size():
lowerCAmelCase = self.sp_model.IdToPiece(__snake_case )
return token
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] ) -> str:
lowerCAmelCase = []
lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> List[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a =logging.get_logger(__name__)
a ={
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ ( lowerCamelCase__ ):
_UpperCAmelCase : Tuple = '''trajectory_transformer'''
_UpperCAmelCase : Optional[int] = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=1_0_0 ,SCREAMING_SNAKE_CASE__ : Tuple=5 ,SCREAMING_SNAKE_CASE__ : Any=1 ,SCREAMING_SNAKE_CASE__ : Any=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2_4_9 ,SCREAMING_SNAKE_CASE__ : Tuple=6 ,SCREAMING_SNAKE_CASE__ : str=1_7 ,SCREAMING_SNAKE_CASE__ : Any=2_5 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_2_8 ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0006 ,SCREAMING_SNAKE_CASE__ : str=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1E-12 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1 ,SCREAMING_SNAKE_CASE__ : List[str]=5_0_2_5_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_0_2_5_6 ,**SCREAMING_SNAKE_CASE__ : str ,):
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Tuple = action_weight
__lowerCamelCase : Any = reward_weight
__lowerCamelCase : Optional[int] = value_weight
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[str] = block_size
__lowerCamelCase : Any = action_dim
__lowerCamelCase : Optional[Any] = observation_dim
__lowerCamelCase : Dict = transition_dim
__lowerCamelCase : List[Any] = learning_rate
__lowerCamelCase : Union[str, Any] = n_layer
__lowerCamelCase : str = n_head
__lowerCamelCase : Tuple = n_embd
__lowerCamelCase : Any = embd_pdrop
__lowerCamelCase : List[str] = attn_pdrop
__lowerCamelCase : Optional[int] = resid_pdrop
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Tuple = kaiming_initializer_range
__lowerCamelCase : List[str] = use_cache
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case)
| 73 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) ) | 297 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=__snake_case ,cache_dir=__snake_case )
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(__snake_case ,os.listdir(__snake_case )[0] ,"""snapshots""" ) )]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__snake_case ,dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""flax""" ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,set_alpha_to_one=__snake_case ,steps_offset=1 ,)
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,scheduler=__snake_case ,safety_checker=__snake_case ,)
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) ,__snake_case )
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,)
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,use_memory_efficient_attention=__snake_case ,)
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 296 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_UpperCamelCase: Optional[int] = logging.get_logger(__name__)
_UpperCamelCase: Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: Optional[Any] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase: List[str] = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
_UpperCamelCase: Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class a__ ( lowerCamelCase__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = BertTokenizer
def __init__( self : Union[str, Any], lowerCAmelCase : Any=None, lowerCAmelCase : Any=None, lowerCAmelCase : List[Any]=True, lowerCAmelCase : int="[UNK]", lowerCAmelCase : Optional[int]="[SEP]", lowerCAmelCase : List[str]="[PAD]", lowerCAmelCase : List[Any]="[CLS]", lowerCAmelCase : Dict="[MASK]", lowerCAmelCase : int=True, lowerCAmelCase : List[str]=None, **lowerCAmelCase : Any, ) -> Any:
super().__init__(
__snake_case, tokenizer_file=__snake_case, do_lower_case=__snake_case, unk_token=__snake_case, sep_token=__snake_case, pad_token=__snake_case, cls_token=__snake_case, mask_token=__snake_case, tokenize_chinese_chars=__snake_case, strip_accents=__snake_case, **__snake_case, )
lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', __snake_case ) != do_lower_case
or normalizer_state.get('strip_accents', __snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars', __snake_case ) != tokenize_chinese_chars
):
lowercase : str = getattr(__snake_case, normalizer_state.pop('type' ) )
lowercase : List[str] = do_lower_case
lowercase : Tuple = strip_accents
lowercase : Optional[Any] = tokenize_chinese_chars
lowercase : int = normalizer_class(**__snake_case )
lowercase : str = do_lower_case
def lowercase ( self : Union[str, Any], lowerCAmelCase : Optional[int], lowerCAmelCase : str=None ) -> Tuple:
lowercase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Tuple, lowerCAmelCase : List[int], lowerCAmelCase : Optional[List[int]] = None ) -> Dict:
lowercase : Dict = [self.sep_token_id]
lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : List[Any], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Optional[Any]:
lowercase : int = self._tokenizer.model.save(__snake_case, name=__snake_case )
return tuple(__snake_case )
| 255 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase: Union[str, Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
from __future__ import annotations
__a : Any = 'Muhammad Umer Farooq'
__a : Optional[Any] = 'MIT'
__a : Optional[int] = '1.0.0'
__a : Optional[Any] = 'Muhammad Umer Farooq'
__a : Tuple = 'contact@muhammadumerfarooq.me'
__a : List[str] = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _UpperCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__lowercase = []
__lowercase = domain
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowercase = parse.urljoin(self.domain , __snake_case )
self.urls.append(__snake_case )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_A ).split('''.''' )[-2:] )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return parse.urlparse(_A ).netloc
def UpperCAmelCase ( lowercase = "https://github.com" ):
"""simple docstring"""
__lowercase = get_domain_name(_A )
# Initialize the parser
__lowercase = Parser(_A )
try:
# Open URL
__lowercase = requests.get(_A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowercase = requests.get(_A )
# Get the valid email.
__lowercase = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_A )
if __name__ == "__main__":
__a : Optional[Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails))) | 210 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: str = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : Tuple ):
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_A , _A ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_A ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCAmelCase: str = {
'allenai/led-base-16384': 1_6_3_8_4,
}
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LEDTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Tuple=None , __snake_case : Dict="replace" , __snake_case : int="<s>" , __snake_case : Any="</s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : List[str]="<pad>" , __snake_case : int="<mask>" , __snake_case : int=False , __snake_case : str=True , **__snake_case : Tuple , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : List[Any] = getattr(__snake_case , pre_tok_state.pop('type' ) )
a : Optional[Any] = add_prefix_space
a : Optional[Any] = pre_tok_class(**__snake_case )
a : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a : Dict = 'post_processor'
a : int = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
a : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : Any = tuple(state['sep'] )
if "cls" in state:
a : Any = tuple(state['cls'] )
a : Optional[Any] = False
if state.get('add_prefix_space' , __snake_case ) != add_prefix_space:
a : Any = add_prefix_space
a : Optional[Any] = True
if state.get('trim_offsets' , __snake_case ) != trim_offsets:
a : List[Any] = trim_offsets
a : Union[str, Any] = True
if changes_to_apply:
a : int = getattr(__snake_case , state.pop('type' ) )
a : List[Any] = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Tuple = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
a : Optional[int] = value
def lowercase_ ( self : Optional[Any] , *__snake_case : Any , **__snake_case : Union[str, Any] ):
a : Dict = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : List[str] ):
a : Optional[int] = kwargs.get('is_split_into_words' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Union[str, Any] = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : int=None ):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : List[str] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
a : Optional[Any] = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
a : str = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Union[str, Any] = len(encoded_inputs['global_attention_mask'] ) != len(__snake_case )
if needs_to_be_padded:
a : str = len(__snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Dict = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 297 | 0 |
'''simple docstring'''
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
_lowerCamelCase : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __lowerCamelCase ( A__=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=_A , default=_A , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=_A , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=_A , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=_A , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
UpperCamelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase = defaults.commands
if not args.tpu_name:
UpperCamelCase = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , _A ):
UpperCamelCase = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
UpperCamelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase = '; '.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_A )}""" )
return
subprocess.run(_A )
print('Successfully setup pod.' )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = tpu_command_parser()
UpperCamelCase = parser.parse_args()
tpu_command_launcher(_A )
| 28 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Tuple ):
a : Optional[int] = ''
a : Optional[Any] = ''
a : str = []
a : int = 0
a : str = 2_56
a : Union[str, Any] = 0
a : Any = 0
a : Optional[int] = 0
a : List[str] = 0
def lowercase_ ( self : str , __snake_case : str ):
a : Any = cva.imread(__snake_case , 0 )
a : Optional[Any] = copy.deepcopy(self.img )
a , a , a : int = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : Optional[int] = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : Optional[Any] = x[i] / self.k
self.sk += prk
a : str = (self.L - 1) * self.sk
if self.rem != 0:
a : Optional[int] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : str = int(np.ma.count(self.img ) / self.img[1].size )
a : Optional[int] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Any = self.img[j][i]
if num != self.last_list[num]:
a : str = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Dict ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : List[Any] ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Tuple = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 297 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A : Optional[int] = datasets.utils.logging.get_logger(__name__)
A : Optional[int] = ['names', 'prefix']
A : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
A : List[str] = ['encoding_errors', 'on_bad_lines']
A : Tuple = ['date_format']
@dataclass
class _lowercase ( datasets.BuilderConfig):
"""simple docstring"""
A__ = ","
A__ = None
A__ = "infer"
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = None
A__ = None
A__ = None
A__ = None
A__ = False
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = False
A__ = True
A__ = None
A__ = "."
A__ = None
A__ = "\""
A__ = 0
A__ = None
A__ = None
A__ = None
A__ = None
A__ = True
A__ = True
A__ = 0
A__ = True
A__ = False
A__ = None
A__ = 1_00_00
A__ = None
A__ = "strict"
A__ = "error"
A__ = None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
if self.delimiter is not None:
lowerCamelCase__ : Union[str, Any] = self.delimiter
if self.column_names is not None:
lowerCamelCase__ : Optional[int] = self.column_names
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowercase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
A__ = CsvConfig
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowerCamelCase__ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
lowerCamelCase__ : Union[str, Any] = data_files
if isinstance(__snake_case , __snake_case ):
lowerCamelCase__ : Dict = [files]
lowerCamelCase__ : Any = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase__ : Any = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
lowerCamelCase__ : Optional[Any] = [files]
lowerCamelCase__ : Optional[Any] = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
lowerCamelCase__ : Any = self.config.features.arrow_schema
if all(not require_storage_cast(__snake_case ) for feature in self.config.features.values() ):
# cheaper cast
lowerCamelCase__ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCamelCase__ : List[Any] = table_cast(__snake_case , __snake_case )
return pa_table
def lowerCAmelCase ( self : Any , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCamelCase__ : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
lowerCamelCase__ : List[Any] = pd.read_csv(__snake_case , iterator=__snake_case , dtype=__snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__snake_case ):
lowerCamelCase__ : Union[str, Any] = pa.Table.from_pandas(__snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__snake_case )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__snake_case )}: {e}" )
raise
| 184 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase_ = (DPMSolverSinglestepScheduler,)
lowercase_ = (("num_inference_steps", 25),)
def lowerCAmelCase_ ( self : int , **_lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**__snake_case )
return config
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict=0 , **_lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ = kwargs.pop('num_inference_steps' , __snake_case )
SCREAMING_SNAKE_CASE_ = self.dummy_sample
SCREAMING_SNAKE_CASE_ = 0.1 * sample
SCREAMING_SNAKE_CASE_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**__snake_case )
SCREAMING_SNAKE_CASE_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
SCREAMING_SNAKE_CASE_ = scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ = sample, sample
for t in range(__snake_case , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE_ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE_ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ = kwargs.pop('num_inference_steps' , __snake_case )
SCREAMING_SNAKE_CASE_ = self.dummy_sample
SCREAMING_SNAKE_CASE_ = 0.1 * sample
SCREAMING_SNAKE_CASE_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
SCREAMING_SNAKE_CASE_ = scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ = scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
SCREAMING_SNAKE_CASE_ = new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str=None , **_lowerCAmelCase : Union[str, Any] ):
if scheduler is None:
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**__snake_case )
SCREAMING_SNAKE_CASE_ = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**__snake_case )
SCREAMING_SNAKE_CASE_ = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = model(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ = 50
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE_ = model(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[int] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowerCAmelCase_ ( self : Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ = self.full_loop(scheduler=__snake_case )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
SCREAMING_SNAKE_CASE_ = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ = self.full_loop(scheduler=__snake_case )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[int] ):
self.check_over_configs(thresholding=__snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__snake_case , prediction_type=__snake_case , sample_max_value=__snake_case , algorithm_type='dpmsolver++' , solver_order=__snake_case , solver_type=__snake_case , )
def lowerCAmelCase_ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def lowerCAmelCase_ ( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
SCREAMING_SNAKE_CASE_ = self.full_loop(
solver_order=__snake_case , solver_type=__snake_case , prediction_type=__snake_case , algorithm_type=__snake_case , )
assert not torch.isnan(__snake_case ).any(), "Samples have nan numbers"
def lowerCAmelCase_ ( self : int ):
self.check_over_configs(lower_order_final=__snake_case )
self.check_over_configs(lower_order_final=__snake_case )
def lowerCAmelCase_ ( self : str ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCAmelCase_ ( self : str ):
self.check_over_configs(variance_type=__snake_case )
self.check_over_configs(variance_type='learned_range' )
def lowerCAmelCase_ ( self : int ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__snake_case , time_step=0 )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.full_loop(use_karras_sigmas=__snake_case )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=__snake_case )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(__snake_case ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(thresholding=__snake_case , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = model(__snake_case , __snake_case )
SCREAMING_SNAKE_CASE_ = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
assert sample.dtype == torch.floataa | 225 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase: int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class a__( unittest.TestCase ):
def lowercase_ ( self : int , __snake_case : str , __snake_case : bool , __snake_case : str = None , __snake_case : list = None ):
a : Optional[int] = None
a : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
a : List[str] = os.path.abspath('examples' )
for item in os.listdir(__snake_case ):
if item not in EXCLUDE_EXAMPLES:
a : int = os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=__snake_case , feature_script=__snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
a : List[Any] = compare_against_test(
os.path.join(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = '\n'.join(__snake_case )
if special_strings is not None:
for string in special_strings:
a : Union[str, Any] = diff.replace(__snake_case , '' )
self.assertEqual(__snake_case , '' )
def lowercase_ ( self : Optional[Any] ):
self.one_complete_example('complete_nlp_example.py' , __snake_case )
self.one_complete_example('complete_nlp_example.py' , __snake_case )
def lowercase_ ( self : Any ):
a : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
a : int = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
self.one_complete_example('complete_cv_example.py' , __snake_case , __snake_case , __snake_case )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a__( lowerCamelCase__ ):
lowercase__ = False
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().setUpClass()
a : List[str] = tempfile.mkdtemp()
a : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
a : Optional[int] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowercase_ ( cls : Optional[int] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def lowercase_ ( self : Dict ):
a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
a : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def lowercase_ ( self : Any ):
a : Tuple = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
a : int = run_command(self._launch_args + testargs , return_stdout=__snake_case )
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
a : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=__snake_case )
if torch.cuda.is_available():
a : Any = torch.cuda.device_count()
else:
a : str = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
else:
self.assertIn('epoch 0:' , __snake_case )
self.assertIn('epoch 1:' , __snake_case )
@slow
def lowercase_ ( self : Tuple ):
a : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
a : Any = run_command(self._launch_args + testargs , return_stdout=__snake_case )
a : Optional[Any] = re.findall('({.+})' , __snake_case )
a : str = [r for r in results if 'accuracy' in r][-1]
a : str = ast.literal_eval(__snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def lowercase_ ( self : Optional[int] ):
a : int = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def lowercase_ ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
a : Optional[Any] = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__snake_case , 'tracking' ) ) )
def lowercase_ ( self : List[str] ):
a : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def lowercase_ ( self : int ):
a : Optional[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 297 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ :Union[str, Any] = logging.get_logger(__name__)
lowercase__ :List[str] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class lowercase ( lowerCamelCase__ ):
lowercase_ : Tuple ='''t5'''
lowercase_ : List[Any] =['''past_key_values''']
lowercase_ : Any ={'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self ,A__=3_2_1_2_8 ,A__=5_1_2 ,A__=6_4 ,A__=2_0_4_8 ,A__=6 ,A__=None ,A__=8 ,A__=3_2 ,A__=1_2_8 ,A__=0.1 ,A__=1E-6 ,A__=1.0 ,A__="relu" ,A__=True ,A__=True ,A__=0 ,A__=1 ,**A__ ,):
lowercase = vocab_size
lowercase = d_model
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = feed_forward_proj
lowercase = use_cache
lowercase = self.feed_forward_proj.split('''-''')
lowercase = act_info[-1]
lowercase = act_info[0] == 'gated'
if len(__snake_case) > 1 and act_info[0] != "gated" or len(__snake_case) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase = 'gelu_new'
super().__init__(
pad_token_id=__snake_case ,eos_token_id=__snake_case ,is_encoder_decoder=__snake_case ,**__snake_case ,)
class lowercase ( lowerCamelCase__ ):
@property
def A__ ( self):
lowercase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase = 'past_encoder_sequence + sequence'
lowercase = {0: 'batch'}
lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase = {0: 'batch', 1: 'decoder_sequence'}
lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__snake_case ,direction='''inputs''')
return common_inputs
@property
def A__ ( self):
return 1_3
| 101 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 297 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCamelCase :
def snake_case_ (self , __a , __a , __a ) -> Optional[int]:
return None
class _lowerCamelCase :
def snake_case_ (self , __a , __a , __a , __a ) -> Optional[int]:
return None
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case_ (self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__snake_case , "tf" , 12 , **__snake_case )
@require_torch
@slow
def snake_case_ (self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__snake_case , "pt" , 12 , **__snake_case )
@require_torch
@slow
def snake_case_ (self ) -> Dict:
from transformers import BertModel
UpperCamelCase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__snake_case ) )
vocab_file.flush()
UpperCamelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase = BertModel(BertConfig(vocab_size=len(__snake_case ) ) )
model.save_pretrained(__snake_case )
self._test_export(__snake_case , "pt" , 12 , __snake_case )
@require_tf
@slow
def snake_case_ (self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase = self._test_export(__snake_case , "tf" , 12 , **__snake_case )
UpperCamelCase = quantize(Path(__snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__snake_case ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case_ (self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase = self._test_export(__snake_case , "pt" , 12 , **__snake_case )
UpperCamelCase = quantize(__snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__snake_case ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case_ (self , __a , __a , __a , __a=None , **__a ) -> Dict:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase = Path(__snake_case ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
return path
except Exception as e:
self.fail(__snake_case )
@require_torch
@require_tokenizers
@slow
def snake_case_ (self ) -> str:
from transformers import BertModel
UpperCamelCase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
UpperCamelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__snake_case , __snake_case , "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case_ (self ) -> Optional[int]:
from transformers import TFBertModel
UpperCamelCase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
UpperCamelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__snake_case , __snake_case , "tf" )
def snake_case_ (self , __a , __a , __a ) -> str:
UpperCamelCase = FeatureExtractionPipeline(__snake_case , __snake_case )
UpperCamelCase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
UpperCamelCase = infer_shapes(__snake_case , __snake_case )
# Assert all variables are present
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __snake_case )
self.assertSequenceEqual(variable_names[3:] , __snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def snake_case_ (self ) -> Any:
UpperCamelCase = ['input_ids', 'attention_mask', 'token_type_ids']
UpperCamelCase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
UpperCamelCase = ensure_valid_input(FuncContiguousArgs() , __snake_case , __snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__snake_case ) , set(__snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__snake_case , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase = ensure_valid_input(FuncNonContiguousArgs() , __snake_case , __snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__snake_case ) , 1 )
self.assertEqual(len(__snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def snake_case_ (self ) -> Any:
UpperCamelCase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 153 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( _A , _A , _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : Any = [image]
if isinstance(image[0] , PIL.Image.Image ):
a : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a : int = np.concatenate(_A , axis=0 )
a : int = np.array(_A ).astype(np.floataa ) / 255.0
a : str = image.transpose(0 , 3 , 1 , 2 )
a : str = 2.0 * image - 1.0
a : Optional[int] = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
a : Optional[Any] = torch.cat(_A , dim=0 )
return image
def lowerCamelCase__ ( _A , _A , _A , _A=0.9995 ):
if not isinstance(_A , np.ndarray ):
a : Dict = True
a : Optional[Any] = va.device
a : Optional[int] = va.cpu().numpy()
a : Union[str, Any] = va.cpu().numpy()
a : Any = np.sum(va * va / (np.linalg.norm(_A ) * np.linalg.norm(_A )) )
if np.abs(_A ) > DOT_THRESHOLD:
a : Any = (1 - t) * va + t * va
else:
a : Any = np.arccos(_A )
a : Tuple = np.sin(_A )
a : Optional[Any] = theta_a * t
a : List[Any] = np.sin(_A )
a : Dict = np.sin(theta_a - theta_t ) / sin_theta_a
a : int = sin_theta_t / sin_theta_a
a : Any = sa * va + sa * va
if inputs_are_torch:
a : Dict = torch.from_numpy(_A ).to(_A )
return va
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = F.normalize(_A , dim=-1 )
a : str = F.normalize(_A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( _A , _A ):
for param in model.parameters():
a : int = value
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
a : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['shortest_edge']
)
a : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
self.enable_attention_slicing(__snake_case )
def lowercase_ ( self : Optional[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : Tuple ):
set_requires_grad(self.vae , __snake_case )
def lowercase_ ( self : int ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
set_requires_grad(self.unet , __snake_case )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] ):
# get the original timestep using init_timestep
a : Optional[Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[Any]=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(__snake_case )}""" )
a : Optional[Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
a : Optional[Any] = torch.cat(__snake_case , dim=0 )
else:
a : Union[str, Any] = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : List[str] = 0.18215 * init_latents
a : str = init_latents.repeat_interleave(__snake_case , dim=0 )
a : Dict = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
a : Dict = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : int = init_latents
return latents
def lowercase_ ( self : List[str] , __snake_case : Dict ):
a : List[Any] = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
a : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
a : Union[str, Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def lowercase_ ( self : Tuple , __snake_case : Any , __snake_case : Optional[Any] ):
a : List[Any] = self.feature_extractor.preprocess(__snake_case )
a : Optional[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
a : int = self.clip_model.get_image_features(__snake_case )
a : str = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : Tuple = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowercase_ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , ):
a : Optional[Any] = latents.detach().requires_grad_()
a : List[Any] = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : Any = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
a : int = self.scheduler.alphas_cumprod[timestep]
a : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
a : Tuple = torch.sqrt(__snake_case )
a : str = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
a : List[Any] = self.scheduler.sigmas[index]
a : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Union[str, Any] = 1 / 0.18215 * sample
a : str = self.vae.decode(__snake_case ).sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Tuple = transforms.Resize(self.feature_extractor_size )(__snake_case )
a : List[str] = self.normalize(__snake_case ).to(latents.dtype )
a : List[str] = self.clip_model.get_image_features(__snake_case )
a : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
a : int = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
a : List[str] = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
a : List[Any] = latents.detach() + grads * (sigma**2)
a : Optional[int] = noise_pred_original
else:
a : List[Any] = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
a : Dict = [generator] + [None] * (batch_size - 1)
a : Any = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
a : List[str] = [x[0] for x in coca_is_none if x[1]]
a : List[str] = ', '.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : int = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
a : Union[str, Any] = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
a : Optional[Any] = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
a : Dict = self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='pt' , )
a : Dict = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
a : Any = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
a : Optional[Any] = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
a : int = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
a : Any = {}
if accepts_offset:
a : Optional[Any] = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
a , a : Tuple = self.get_timesteps(__snake_case , __snake_case , self.device )
a : Optional[int] = timesteps[:1].repeat(__snake_case )
# Preprocess image
a : Optional[Any] = preprocess(__snake_case , __snake_case , __snake_case )
a : List[Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : str = preprocess(__snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
a : Union[str, Any] = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
a : Dict = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : int = self.get_clip_image_embeddings(__snake_case , __snake_case )
a : List[str] = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a : Any = content_text_input.input_ids.shape[-1]
a : List[Any] = self.tokenizer([''] , padding='max_length' , max_length=__snake_case , return_tensors='pt' )
a : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
a : Dict = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
a : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
a : int = torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
a : Optional[int] = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : Optional[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : Union[str, Any] = {}
if accepts_eta:
a : List[str] = eta
# check if the scheduler accepts generator
a : List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
a : Any = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
a : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a : Dict = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
a : List[Any] = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
a , a : List[str] = noise_pred.chunk(2 )
a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
a : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
a , a : Union[str, Any] = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
a : Tuple = 1 / 0.18215 * latents
a : Optional[int] = self.vae.decode(__snake_case ).sample
a : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
a : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : str = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case ) | 297 | 0 |
'''simple docstring'''
from __future__ import annotations
__snake_case =tuple[int, int, int]
__snake_case =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__snake_case ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__snake_case ='EGZWVONAHDCLFQMSIPJBYUKXTR'
__snake_case ='FOBHMDKEXQNRAULPGSJVTYICZW'
__snake_case ='ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__snake_case ={
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__snake_case ='RMDJXFUWGISLHVTCQNKYPBEZOA'
__snake_case ='SGLCPQWZHKXAREONTFBVIYJUDM'
__snake_case ='HVSICLTYKQUBXDWAJZOMFGPREN'
__snake_case ='RZWQHFMVDBKICJLNTUXAGYPSOE'
__snake_case ='LFKIJODBEGAMQPXVUHYSTCZRWN'
__snake_case ='KOAEGVDHXPQZMLFTYWJNBRCIUS'
def a_ ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : int ):
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_A ) )) < 3:
lowerCAmelCase = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_A )
# Checks if rotor positions are valid
lowerCAmelCase = rotpos
if not 0 < rotorposa <= len(_A ):
lowerCAmelCase = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_A )
if not 0 < rotorposa <= len(_A ):
lowerCAmelCase = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_A )
if not 0 < rotorposa <= len(_A ):
lowerCAmelCase = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_A )
# Validates string and returns dict
lowerCAmelCase = _plugboard(_A )
return rotpos, rotsel, pbdict
def a_ ( lowerCamelCase : Any ):
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_A , _A ):
lowerCAmelCase = f'''Plugboard setting isn\'t type string ({type(_A )})'''
raise TypeError(_A )
elif len(_A ) % 2 != 0:
lowerCAmelCase = f'''Odd number of symbols ({len(_A )})'''
raise Exception(_A )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase = f'''\'{i}\' not in list of symbols'''
raise Exception(_A )
elif i in tmppbl:
lowerCAmelCase = f'''Duplicate symbol ({i})'''
raise Exception(_A )
else:
tmppbl.add(_A )
del tmppbl
# Created the dictionary
lowerCAmelCase = {}
for j in range(0 , len(_A ) - 1 , 2 ):
lowerCAmelCase = pbstring[j + 1]
lowerCAmelCase = pbstring[j]
return pb
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[str] = (rotora, rotora, rotora) , lowerCamelCase : int = "" , ):
lowerCAmelCase = text.upper()
lowerCAmelCase = _validator(
_A , _A , plugb.upper() )
lowerCAmelCase = rotor_position
lowerCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase = abc.index(_A ) + rotorposa
lowerCAmelCase = rotora[index % len(_A )]
# rotor rb --------------------------
lowerCAmelCase = abc.index(_A ) + rotorposa
lowerCAmelCase = rotora[index % len(_A )]
# rotor rc --------------------------
lowerCAmelCase = abc.index(_A ) + rotorposa
lowerCAmelCase = rotora[index % len(_A )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase = reflector[symbol]
# 2nd rotors
lowerCAmelCase = abc[rotora.index(_A ) - rotorposa]
lowerCAmelCase = abc[rotora.index(_A ) - rotorposa]
lowerCAmelCase = abc[rotora.index(_A ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_A ):
lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_A ):
lowerCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_A ):
lowerCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_A )
return "".join(_A )
if __name__ == "__main__":
__snake_case ='This is my Python script that emulates the Enigma machine from WWII.'
__snake_case =(1, 1, 1)
__snake_case ='pictures'
__snake_case =(rotora, rotora, rotora)
__snake_case =enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 4 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A , _A , _A , _A , ):
a : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
a : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
a : Union[str, Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
a : int = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase: Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 297 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : List[str] = tmp_path / 'file.csv'
__lowerCamelCase : List[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_A , 'w' ) as f:
f.write(_A )
return str(_A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = tmp_path / 'malformed_file.csv'
__lowerCamelCase : str = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_A , 'w' ) as f:
f.write(_A )
return str(_A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : Tuple = tmp_path / 'csv_with_image.csv'
__lowerCamelCase : List[str] = textwrap.dedent(
F"\\n image\n {image_file}\n " )
with open(_A , 'w' ) as f:
f.write(_A )
return str(_A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : int = tmp_path / 'csv_with_label.csv'
__lowerCamelCase : str = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_A , 'w' ) as f:
f.write(_A )
return str(_A )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : str = tmp_path / 'csv_with_int_list.csv'
__lowerCamelCase : List[Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_A , 'w' ) as f:
f.write(_A )
return str(_A )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : int = Csv()
__lowerCamelCase : Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_A , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_A ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
with open(_A , encoding='utf-8' ) as f:
__lowerCamelCase : Optional[Any] = f.read().splitlines()[1]
__lowerCamelCase : Dict = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__lowerCamelCase : Dict = csv._generate_tables([[csv_file_with_image]] )
__lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__lowerCamelCase : List[Any] = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
with open(_A , encoding='utf-8' ) as f:
__lowerCamelCase : List[Any] = f.read().splitlines()[1:]
__lowerCamelCase : Dict = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
__lowerCamelCase : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__lowerCamelCase : Dict = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_A ) for label in labels]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : List[str] = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda lowerCamelCase__ : [int(_A ) for i in x.split()]} )
__lowerCamelCase : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
__lowerCamelCase : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__lowerCamelCase : Union[str, Any] = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 73 |
'''simple docstring'''
from __future__ import annotations
import math
class a__:
def __init__( self : List[str] , __snake_case : int ):
a : str = size
# approximate the overall size of segment tree with given value
a : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
a : Any = [0 for i in range(0 , 4 * size )]
a : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase_ ( self : int , __snake_case : int ):
return idx * 2
def lowercase_ ( self : Dict , __snake_case : int ):
return idx * 2 + 1
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
if left_element == right_element:
a : Tuple = a[left_element - 1]
else:
a : Tuple = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
a : Union[str, Any] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : int = self.lazy[idx]
a : Union[str, Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : int = self.lazy[idx]
a : Tuple = True
a : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a : int = val
if left_element != right_element:
a : int = val
a : Dict = val
a : List[str] = True
a : List[str] = True
return True
a : Tuple = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
a : Optional[int] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowercase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
if self.flag[idx] is True:
a : str = self.lazy[idx]
a : Optional[Any] = False
if left_element != right_element:
a : Dict = self.lazy[idx]
a : Union[str, Any] = self.lazy[idx]
a : Dict = True
a : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a : Dict = (left_element + right_element) // 2
a : Optional[int] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase: int = 1_5
lowerCAmelCase: Optional[int] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 297 | 0 |
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_A )
SCREAMING_SNAKE_CASE = sum(_A )
SCREAMING_SNAKE_CASE = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE = s - 2 * j
break
return diff
| 296 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
while second != 0:
a : Union[str, Any] = first & second
first ^= second
a : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase: Optional[int] = int(input('Enter the first number: ').strip())
lowerCAmelCase: Union[str, Any] = int(input('Enter the second number: ').strip())
print(F"{add(first, second) = }") | 297 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.