code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : List[Any] ) ->List[str]:
super().__init__()
UpperCAmelCase_ = nn.ModuleList(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[Any] = False , UpperCAmelCase__ : str = True , ) ->Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase__ , UpperCAmelCase__ , self.nets ) ):
UpperCAmelCase_ = controlnet(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
# merge samples
if i == 0:
UpperCAmelCase_ = down_samples, mid_sample
else:
UpperCAmelCase_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase__ ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : List[Any] = False , UpperCAmelCase__ : List[str] = None , ) ->int:
UpperCAmelCase_ = 0
UpperCAmelCase_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase__ , is_main_process=UpperCAmelCase__ , save_function=UpperCAmelCase__ , safe_serialization=UpperCAmelCase__ , variant=UpperCAmelCase__ , )
idx += 1
UpperCAmelCase_ = model_path_to_save + f"""_{idx}"""
@classmethod
def lowerCAmelCase__ ( cls : int , UpperCAmelCase__ : str , **UpperCAmelCase__ : int ) ->int:
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ = pretrained_model_path
while os.path.isdir(UpperCAmelCase__ ):
UpperCAmelCase_ = ControlNetModel.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
controlnets.append(UpperCAmelCase__ )
idx += 1
UpperCAmelCase_ = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(UpperCAmelCase__ )} controlnets loaded from {pretrained_model_path}.""" )
if len(UpperCAmelCase__ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(UpperCAmelCase__ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(UpperCAmelCase__ )
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__magic_name__: Tuple = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__magic_name__: Optional[int] = 'hopper-medium-v2'
__magic_name__: Union[str, Any] = gym.make(env_name)
__magic_name__: Dict = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__magic_name__: List[str] = env.reset()
__magic_name__: Dict = 0
__magic_name__: Any = 0
__magic_name__: Optional[int] = 1_000
__magic_name__: List[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__magic_name__: Any = pipeline(obs, planning_horizon=32)
# execute action in environment
__magic_name__: int = env.step(denorm_actions)
__magic_name__: Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__magic_name__: Any = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 324 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase_ ( __A : Tuple ) -> Dict:
"""simple docstring"""
def wrapper(*__A : Optional[Any] , **__A : Optional[int] ):
lowercase : Optional[int] =timeit.default_timer()
lowercase : str =func(*snake_case_ , **snake_case_ )
lowercase : str =timeit.default_timer() - starttime
return delta
lowercase : List[Any] =func.__name__
return wrapper
def lowercase_ ( __A : dict , __A : Any=1_0_0 , __A : str=None ) -> Dict:
"""simple docstring"""
lowercase : Tuple =[]
lowercase : Any =seq_shapes or {}
for i in range(snake_case_ ):
lowercase : Any ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(snake_case_ , _ArrayXD ):
lowercase : Union[str, Any] =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(snake_case_ , datasets.Value ):
if v.dtype == "string":
lowercase : Optional[int] ='''The small grey turtle was surprisingly fast when challenged.'''
else:
lowercase : Union[str, Any] =np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(snake_case_ , datasets.Sequence ):
while isinstance(snake_case_ , datasets.Sequence ):
lowercase : Dict =v.feature
lowercase : Dict =seq_shapes[k]
lowercase : Dict =np.random.rand(*snake_case_ ).astype(v.dtype )
lowercase : Dict =data
dummy_data.append((i, example) )
return dummy_data
def lowercase_ ( __A : int , __A : Optional[Any] , __A : Optional[Any]=1_0_0 , __A : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] =generate_examples(snake_case_ , num_examples=snake_case_ , seq_shapes=snake_case_ )
with ArrowWriter(features=snake_case_ , path=snake_case_ ) as writer:
for key, record in dummy_data:
lowercase : Tuple =features.encode_example(snake_case_ )
writer.write(snake_case_ )
lowercase : Tuple =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowercase : Union[str, Any] =datasets.Dataset.from_file(filename=snake_case_ , info=datasets.DatasetInfo(features=snake_case_ ) )
return dataset
| 94 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 2_000_000 )-> Optional[Any]:
_lowerCamelCase = [0 for i in range(n + 1 )]
_lowerCamelCase = 1
_lowerCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case_ ):
_lowerCamelCase = 1
_lowerCamelCase = 0
for i in range(snake_case_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 650 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__magic_name__ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
def __init__( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : int ):
"""simple docstring"""
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self : Dict , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
if prompt is not None:
lowerCAmelCase__ = prompt
if generate_kwargs is not None:
lowerCAmelCase__ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCAmelCase__ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCAmelCase__ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Tuple , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ):
"""simple docstring"""
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = load_image(__lowerCamelCase )
if prompt is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(__lowerCamelCase )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCAmelCase__ = self.model.config.model_type
if model_type == "git":
lowerCAmelCase__ = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
lowerCAmelCase__ = self.tokenizer(text=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids
lowerCAmelCase__ = [self.tokenizer.cls_token_id] + input_ids
lowerCAmelCase__ = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCAmelCase__ = self.image_processor(images=__lowerCamelCase , header_text=__lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCAmelCase__ = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
lowerCAmelCase__ = self.tokenizer(__lowerCamelCase , return_tensors=self.framework )
model_inputs.update(__lowerCamelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowerCAmelCase__ = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCAmelCase__ = None
return model_inputs
def A__ ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowerCamelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCAmelCase__ = None
if generate_kwargs is None:
lowerCAmelCase__ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCAmelCase__ = model_inputs.pop(self.model.main_input_name )
lowerCAmelCase__ = self.model.generate(__lowerCamelCase , **__lowerCamelCase , **__lowerCamelCase )
return model_outputs
def A__ ( self : List[str] , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for output_ids in model_outputs:
lowerCAmelCase__ = {
'''generated_text''': self.tokenizer.decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , )
}
records.append(__lowerCamelCase )
return records
| 615 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCamelCase_ ):
"""simple docstring"""
_lowerCamelCase : int = ['image_processor', 'tokenizer']
_lowerCamelCase : Dict = 'BlipImageProcessor'
_lowerCamelCase : List[str] = 'AutoTokenizer'
def __init__( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : str ):
A_ = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
A_ = self.image_processor
def __call__( self : str , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[Any] = True , UpperCAmelCase : Any = False , UpperCAmelCase : Dict = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Union[str, Any] = 0 , UpperCAmelCase : Tuple = None , UpperCAmelCase : str = None , UpperCAmelCase : Dict = False , UpperCAmelCase : int = False , UpperCAmelCase : Optional[Any] = False , UpperCAmelCase : List[Any] = False , UpperCAmelCase : List[str] = False , UpperCAmelCase : str = True , UpperCAmelCase : Any = None , **UpperCAmelCase : int , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
A_ = self.tokenizer
A_ = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
A_ = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
A_ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def __A ( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Dict , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __A ( self : List[Any] ):
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 86 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase : Any = pytest.mark.integration
UpperCamelCase : Any = {'comet'}
UpperCamelCase : Any = importlib.util.find_spec("""fairseq""") is not None
UpperCamelCase : Tuple = {'code_eval'}
UpperCamelCase : str = os.name == 'nt'
UpperCamelCase : List[Any] = {'bertscore', 'frugalscore', 'perplexity'}
UpperCamelCase : Tuple = importlib.util.find_spec("""transformers""") is not None
def UpperCamelCase_ ( __a ) -> Dict:
@wraps(snake_case_ )
def wrapper(self , __a ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , snake_case_ )
return wrapper
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
@wraps(snake_case_ )
def wrapper(self , __a ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , snake_case_ )
return wrapper
def UpperCamelCase_ ( __a ) -> int:
@wraps(snake_case_ )
def wrapper(self , __a ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , snake_case_ )
return wrapper
def UpperCamelCase_ ( ) -> str:
a__ : Any = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@local
class A__ ( parameterized.TestCase ):
"""simple docstring"""
_lowercase = {}
_lowercase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str ):
a__ : Optional[int] = '''[...]'''
a__ : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCamelCase__ ) ).module_path )
a__ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase__ )
# check parameters
a__ : Union[str, Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCamelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
a__ : List[str] = doctest.testmod(lowerCamelCase__ , verbose=lowerCamelCase__ , raise_on_error=lowerCamelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] ):
a__ : str = '''[...]'''
a__ : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , lowerCamelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
a__ : Union[str, Any] = doctest.testmod(lowerCamelCase__ , verbose=lowerCamelCase__ , raise_on_error=lowerCamelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase__ ):
yield
else:
yield
@contextmanager
def _UpperCamelCase( self : str ):
def load_local_metric(lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ):
return load_metric(os.path.join("metrics" , lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
a__ : List[str] = load_local_metric
yield
@classmethod
def _UpperCamelCase( cls : Dict , lowerCamelCase__ : Tuple ):
def wrapper(lowerCamelCase__ : Optional[int] ):
a__ : Dict = contextmanager(lowerCamelCase__ )
a__ : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCamelCase_ ( __a ) -> Optional[int]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A__ ( UpperCamelCase_ ):
"""simple docstring"""
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a__ : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCamelCase_ ( __a ) -> Optional[int]:
import torch
def bert_cos_score_idf(__a , __a , *__a , **__a ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a__ : Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCamelCase_ ( __a ) -> str:
def load_from_checkpoint(__a ):
class A__ :
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
assert len(lowerCamelCase__ ) == 2
a__ : List[Any] = [0.19, 0.92]
return scores, sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a__ : str = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a__ : List[Any] = load_from_checkpoint
yield
def UpperCamelCase_ ( ) -> Any:
a__ : int = load_metric(os.path.join("metrics" , "seqeval" ) )
a__ : int = '''ERROR'''
a__ : Dict = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case_ , match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] , references=[] , scheme=snake_case_ )
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Any = '''vit_mae'''
def __init__( self : Tuple , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : int=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : List[str]=2_24 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : str=20_48 , __lowerCAmelCase : Optional[Any]=0.7_5 , __lowerCAmelCase : List[str]=False , **__lowerCAmelCase : Dict , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
A__ = decoder_num_attention_heads
A__ = decoder_hidden_size
A__ = decoder_num_hidden_layers
A__ = decoder_intermediate_size
A__ = mask_ratio
A__ = norm_pix_loss
| 176 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ) -> List[str]:
lowercase : str = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=snake_case_ ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=snake_case_ ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=snake_case_ )
return parser.parse_args()
def __snake_case ( ) -> Union[str, Any]:
lowercase : Dict = parse_args()
# Import training_script as a module.
lowercase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Optional[Any] = script_fpath.stem
lowercase : Dict = importlib.import_module(snake_case_ )
# Patch sys.argv
lowercase : List[str] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 607 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 448 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Dict = logging.get_logger(__name__)
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''pixel_values''']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str = True , UpperCAmelCase__ : str = 32 , UpperCAmelCase__ : int=PILImageResampling.BILINEAR , UpperCAmelCase__ : List[str] = True , **UpperCAmelCase__ : Any , ) ->None:
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = resample
super().__init__(**UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict = None , **UpperCAmelCase__ : List[str] ) ->np.ndarray:
UpperCAmelCase_ = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase_ = height // size_divisor * size_divisor
UpperCAmelCase_ = width // size_divisor * size_divisor
UpperCAmelCase_ = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : List[str] ) ->np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = ChannelDimension.FIRST , **UpperCAmelCase__ : Union[str, Any] , ) ->BatchFeature:
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCAmelCase_ = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
UpperCAmelCase_ = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__: Optional[Any] = logging.get_logger(__name__)
__magic_name__: str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__: str = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
__magic_name__: int = {
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class snake_case__ ( UpperCamelCase_ ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Dict = ['''input_ids''', '''attention_mask''']
lowercase__ : List[Any] = GPTaTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = kwargs.pop("""add_bos_token""" , lowerCAmelCase__ )
__magic_name__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
__magic_name__ : Optional[Any] = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
__magic_name__ : Optional[Any] = add_prefix_space
__magic_name__ : Any = pre_tok_class(**lowerCAmelCase__ )
__magic_name__ : List[Any] = add_prefix_space
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
__magic_name__ : int = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
__magic_name__ : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
__magic_name__ : str = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[int]:
__magic_name__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__magic_name__ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 324 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def lowercase_ ( __A : int ) -> List[Any]:
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
"""simple docstring"""
from math import sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> Tuple:
_lowerCamelCase = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 10_000 )-> Tuple:
_lowerCamelCase = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 650 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase__ : Optional[Any] = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : Any = '''A painting of a squirrel eating a burger '''
__A : List[str] = torch.manual_seed(0)
__A : List[Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase)
__A : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : str = generator.manual_seed(0)
__A : List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_UpperCAmelCase)
pipe.set_progress_bar_config(disable=_UpperCAmelCase)
__A : int = '''A painting of a squirrel eating a burger '''
__A : Union[str, Any] = torch.manual_seed(0)
__A : str = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
__A : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__A : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 | 8 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase__ = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
lowerCAmelCase__ = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
else:
lowerCAmelCase__ = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case_ )
lowerCAmelCase__ = ProphetNetForConditionalGeneration.from_pretrained(
snake_case_ , output_loading_info=snake_case_ )
lowerCAmelCase__ = ['''key_proj''', '''value_proj''', '''query_proj''']
lowerCAmelCase__ = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
lowerCAmelCase__ = key.split('''.''' )
if attributes[0] == "lm_head":
lowerCAmelCase__ = prophet
lowerCAmelCase__ = prophet_old
else:
lowerCAmelCase__ = prophet.prophetnet
lowerCAmelCase__ = prophet_old.model
lowerCAmelCase__ = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase__ = mapping[attribute]
if not hasattr(snake_case_ , snake_case_ ) and len(snake_case_ ) > 0:
lowerCAmelCase__ = attribute
elif hasattr(snake_case_ , snake_case_ ):
lowerCAmelCase__ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase__ = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowerCAmelCase__ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase__ = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowerCAmelCase__ = True
break
elif attribute in special_keys and hasattr(snake_case_ , '''in_proj_weight''' ):
lowerCAmelCase__ = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase__ = getattr(snake_case_ , snake_case_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase__ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase__ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowerCAmelCase__ = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
lowerCAmelCase__ = True
break
if attribute.isdigit():
lowerCAmelCase__ = model[int(snake_case_ )]
lowerCAmelCase__ = old_model[int(snake_case_ )]
else:
lowerCAmelCase__ = getattr(snake_case_ , snake_case_ )
if old_attribute == "":
lowerCAmelCase__ = old_model
else:
if not hasattr(snake_case_ , snake_case_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowerCAmelCase__ = getattr(snake_case_ , snake_case_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case_ )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__magic_name__ : str = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 615 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
from math import pi, sqrt, tan
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
A_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(snake_case_ ,2 ) * torus_radius * tube_radius
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
A_ = (sidea + sidea + sidea) / 2
A_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def __snake_case ( __UpperCamelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def __snake_case ( __UpperCamelCase : float ,__UpperCamelCase : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : float ):
"""simple docstring"""
if not isinstance(snake_case_ ,snake_case_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \\nlength of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }") | 86 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
UpperCamelCase : Any = True
from torch.cuda.amp import autocast
UpperCamelCase : Tuple = logging.getLogger(__name__)
def UpperCamelCase_ ( __a=None , __a=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowercase = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_lowercase = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
_lowercase = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
_lowercase = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
_lowercase = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
_lowercase = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
_lowercase = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowercase = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowercase = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowercase = field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_lowercase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowercase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
_lowercase = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '\"\"', '%', '\'', '\"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = 4_2
_lowercase = True
_lowercase = None
_lowercase = None
_lowercase = None
_lowercase = None
def __call__( self : int , lowerCamelCase__ : str ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
a__ : Optional[int] = [{'''input_values''': feature['''input_values''']} for feature in features]
a__ : Dict = [{'''input_ids''': feature['''labels''']} for feature in features]
a__ : Dict = self.processor.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
a__ : int = self.processor.pad(
labels=lowerCamelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
a__ : Optional[int] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
a__ : List[Any] = labels
return batch
class A__ ( UpperCamelCase_ ):
"""simple docstring"""
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ):
model.train()
a__ : Any = self._prepare_inputs(lowerCamelCase__ )
if self.use_amp:
with autocast():
a__ : Tuple = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
else:
a__ : List[Any] = self.compute_loss(lowerCamelCase__ , lowerCamelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
a__ : Dict = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a__ : str = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
a__ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase__ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a__ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
a__ : Optional[int] = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
a__ : str = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
a__ : Optional[int] = f'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__a ):
a__ : Any = re.sub(snake_case_ , "" , batch["sentence"] ).lower() + ''' '''
return batch
a__ : int = train_dataset.map(snake_case_ , remove_columns=["sentence"] )
a__ : List[str] = eval_dataset.map(snake_case_ , remove_columns=["sentence"] )
def extract_all_chars(__a ):
a__ : int = ''' '''.join(batch["text"] )
a__ : List[Any] = list(set(snake_case_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
a__ : Tuple = train_dataset.map(
snake_case_ , batched=snake_case_ , batch_size=-1 , keep_in_memory=snake_case_ , remove_columns=train_dataset.column_names , )
a__ : int = train_dataset.map(
snake_case_ , batched=snake_case_ , batch_size=-1 , keep_in_memory=snake_case_ , remove_columns=eval_dataset.column_names , )
a__ : Optional[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
a__ : Optional[Any] = {v: k for k, v in enumerate(snake_case_ )}
a__ : List[str] = vocab_dict[''' ''']
del vocab_dict[" "]
a__ : Optional[Any] = len(snake_case_ )
a__ : Dict = len(snake_case_ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(snake_case_ , snake_case_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : str = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
a__ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ )
a__ : int = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
a__ : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
a__ : Any = min(len(snake_case_ ) , data_args.max_train_samples )
a__ : List[Any] = train_dataset.select(range(snake_case_ ) )
if data_args.max_val_samples is not None:
a__ : Dict = eval_dataset.select(range(data_args.max_val_samples ) )
a__ : Tuple = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__a ):
a__ : Any = torchaudio.load(batch["path"] )
a__ : Dict = resampler(snake_case_ ).squeeze().numpy()
a__ : Any = 16_000
a__ : str = batch['''text''']
return batch
a__ : Optional[int] = train_dataset.map(
snake_case_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
a__ : Tuple = eval_dataset.map(
snake_case_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__a ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
a__ : Tuple = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(snake_case_ )
return batch
a__ : Union[str, Any] = train_dataset.map(
snake_case_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , )
a__ : str = eval_dataset.map(
snake_case_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
a__ : str = datasets.load_metric("wer" )
def compute_metrics(__a ):
a__ : Optional[Any] = pred.predictions
a__ : Any = np.argmax(snake_case_ , axis=-1 )
a__ : Any = processor.tokenizer.pad_token_id
a__ : Optional[Any] = processor.batch_decode(snake_case_ )
# we do not want to group tokens when computing the metrics
a__ : Dict = processor.batch_decode(pred.label_ids , group_tokens=snake_case_ )
a__ : List[Any] = wer_metric.compute(predictions=snake_case_ , references=snake_case_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
a__ : List[Any] = DataCollatorCTCWithPadding(processor=snake_case_ , padding=snake_case_ )
# Initialize our Trainer
a__ : Optional[Any] = CTCTrainer(
model=snake_case_ , data_collator=snake_case_ , args=snake_case_ , compute_metrics=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a__ : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
a__ : Any = model_args.model_name_or_path
else:
a__ : Optional[Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
a__ : Union[str, Any] = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
a__ : List[str] = train_result.metrics
a__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
a__ : Union[str, Any] = min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics("train" , snake_case_ )
trainer.save_metrics("train" , snake_case_ )
trainer.save_state()
# Evaluation
a__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Optional[int] = trainer.evaluate()
a__ : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case_ )
a__ : Tuple = min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics("eval" , snake_case_ )
trainer.save_metrics("eval" , snake_case_ )
return results
if __name__ == "__main__":
main()
| 37 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( __a :List[Any] , __a :Dict , __a :List[Any] ) -> List[Any]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase ( __a :np.ndarray , __a :Optional[str] , __a :Optional[str] ) -> str:
"""simple docstring"""
A__ = to_pil_image(snake_case_ )
A__ = pil_image.size
A__ = pytesseract.image_to_data(snake_case_ , lang=snake_case_ , output_type="""dict""" , config=snake_case_ )
A__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
A__ = [idx for idx, word in enumerate(snake_case_ ) if not word.strip()]
A__ = [word for idx, word in enumerate(snake_case_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A__ = []
for x, y, w, h in zip(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
A__ = [x, y, x + w, y + h]
actual_boxes.append(snake_case_ )
# finally, normalize the bounding boxes
A__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case_ , snake_case_ , snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , __lowerCAmelCase : List[Any] = True , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[int] = PILImageResampling.BILINEAR , __lowerCAmelCase : Any = True , __lowerCAmelCase : Optional[int] = 1 / 2_55 , __lowerCAmelCase : Any = True , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any = True , __lowerCAmelCase : Dict = None , __lowerCAmelCase : int = "" , **__lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
A__ = get_size_dict(__lowerCAmelCase )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_value
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
A__ = apply_ocr
A__ = ocr_lang
A__ = tesseract_config
def a_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Dict = PILImageResampling.BILINEAR , __lowerCAmelCase : List[Any] = None , **__lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
A__ = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
A__ = (size['''height'''], size['''width'''])
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] = None , **__lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = None , __lowerCAmelCase : int = None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int = None , __lowerCAmelCase : str = None , __lowerCAmelCase : Union[str, Any] = None , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : int = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : List[Any] = None , __lowerCAmelCase : Any = None , __lowerCAmelCase : Dict = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__lowerCAmelCase )
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = apply_ocr if apply_ocr is not None else self.apply_ocr
A__ = ocr_lang if ocr_lang is not None else self.ocr_lang
A__ = tesseract_config if tesseract_config is not None else self.tesseract_config
A__ = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__lowerCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
A__ = []
A__ = []
for image in images:
A__ = apply_tesseract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
words_batch.append(__lowerCAmelCase )
boxes_batch.append(__lowerCAmelCase )
if do_resize:
A__ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
A__ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
A__ = BatchFeature(data={"""pixel_values""": images} , tensor_type=__lowerCAmelCase )
if apply_ocr:
A__ = words_batch
A__ = boxes_batch
return data
| 176 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCamelCase_ ):
__UpperCAmelCase = (DPMSolverSinglestepScheduler,)
__UpperCAmelCase = (("""num_inference_steps""", 25),)
def _UpperCAmelCase ( self , **snake_case ) -> str:
"""simple docstring"""
lowercase : Any = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float("""inf""" ),
'''variance_type''': None,
}
config.update(**snake_case )
return config
def _UpperCAmelCase ( self , snake_case=0 , **snake_case ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase : Tuple = kwargs.pop("""num_inference_steps""" , snake_case )
lowercase : Tuple = self.dummy_sample
lowercase : List[Any] = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase : str = self.get_scheduler_config(**snake_case )
lowercase : Dict = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
lowercase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
lowercase : List[str] = scheduler_class.from_pretrained(snake_case )
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residuals
lowercase : int = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase : Dict = sample, sample
for t in range(snake_case , time_step + scheduler.config.solver_order + 1 ):
lowercase : str = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
lowercase : str = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self , snake_case=0 , **snake_case ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = dict(self.forward_default_kwargs )
lowercase : List[Any] = kwargs.pop("""num_inference_steps""" , snake_case )
lowercase : Tuple = self.dummy_sample
lowercase : Dict = 0.1 * sample
lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase : Optional[Any] = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**snake_case )
scheduler.set_timesteps(snake_case )
# copy over dummy past residuals (must be after setting timesteps)
lowercase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case )
lowercase : List[Any] = scheduler_class.from_pretrained(snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case )
# copy over dummy past residual (must be after setting timesteps)
lowercase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase : Dict = scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
lowercase : str = new_scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , snake_case=None , **snake_case ) -> List[Any]:
"""simple docstring"""
if scheduler is None:
lowercase : Dict = self.scheduler_classes[0]
lowercase : Dict = self.get_scheduler_config(**snake_case )
lowercase : List[str] = scheduler_class(**snake_case )
lowercase : str = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config(**snake_case )
lowercase : Optional[Any] = scheduler_class(**snake_case )
lowercase : Tuple = 1_0
lowercase : Optional[Any] = self.dummy_model()
lowercase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase : str = model(snake_case , snake_case )
lowercase : Tuple = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase : Tuple = 5_0
lowercase : List[Any] = self.dummy_model()
lowercase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowercase : Dict = model(snake_case , snake_case )
lowercase : str = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
lowercase : Union[str, Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowercase : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowercase : int = self.full_loop(scheduler=snake_case )
lowercase : Optional[int] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
lowercase : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
lowercase : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase : List[Any] = self.full_loop(scheduler=snake_case )
lowercase : int = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=snake_case )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , algorithm_type="""dpmsolver++""" , solver_order=snake_case , solver_type=snake_case , )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
lowercase : int = self.full_loop(
solver_order=snake_case , solver_type=snake_case , prediction_type=snake_case , algorithm_type=snake_case , )
assert not torch.isnan(snake_case ).any(), "Samples have nan numbers"
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case )
self.check_over_configs(lower_order_final=snake_case )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(variance_type=snake_case )
self.check_over_configs(variance_type="""learned_range""" )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case , time_step=0 )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = self.full_loop()
lowercase : List[Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = self.full_loop(use_karras_sigmas=snake_case )
lowercase : Dict = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Any = self.full_loop(prediction_type="""v_prediction""" )
lowercase : Union[str, Any] = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : str = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=snake_case )
lowercase : Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : int = self.scheduler_classes[0]
lowercase : int = self.get_scheduler_config(thresholding=snake_case , dynamic_thresholding_ratio=0 )
lowercase : Optional[int] = scheduler_class(**snake_case )
lowercase : str = 1_0
lowercase : Tuple = self.dummy_model()
lowercase : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Union[str, Any] = model(snake_case , snake_case )
lowercase : Union[str, Any] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 607 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
__a ="mvp"
__a =["past_key_values"]
__a ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCamelCase=5_0267 , lowerCamelCase=1024 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=12 , lowerCamelCase=4096 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase="gelu" , lowerCamelCase=1024 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=0.0 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=True , lowerCamelCase=2 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=100 , lowerCamelCase=800 , **lowerCamelCase , ) ->Dict:
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = classifier_dropout
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = use_prompt
__a = prompt_length
__a = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowerCamelCase ):
__a = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' ) | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ : Tuple = 3
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
print('''Generating primitive root of p''' )
while True:
UpperCAmelCase_ = random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
print('''Generating prime p...''' )
UpperCAmelCase_ = rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
UpperCAmelCase_ = primitive_root(snake_case_ ) # one primitive root on modulo p.
UpperCAmelCase_ = random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase_ = cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
UpperCAmelCase_ = (key_size, e_a, e_a, p)
UpperCAmelCase_ = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ):
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
UpperCAmelCase_ = generate_key(snake_case_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ):
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCamelCase ( _A, _A=False ):
"""simple docstring"""
__magic_name__ : Optional[int] = OmegaConf.load(snake_case_ )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case_ ) ) )
return config
def UpperCamelCase ( _A, _A=None, _A=None ):
"""simple docstring"""
if conf_path is None:
__magic_name__ : int = '''./model_checkpoints/vqgan_only.yaml'''
__magic_name__ : Optional[Any] = load_config(snake_case_, display=snake_case_ )
__magic_name__ : Union[str, Any] = VQModel(**config.model.params )
if ckpt_path is None:
__magic_name__ : Any = '''./model_checkpoints/vqgan_only.pt'''
__magic_name__ : str = torch.load(snake_case_, map_location=snake_case_ )
if ".ckpt" in ckpt_path:
__magic_name__ : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case_, strict=snake_case_ )
model.to(snake_case_ )
del sd
return model
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = model.encode(snake_case_ )
print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
__magic_name__ : Any = model.decode(snake_case_ )
return xrec
def UpperCamelCase ( _A, _A=False ):
"""simple docstring"""
__magic_name__ : Optional[int] = string.rsplit(""".""", 1 )
if reload:
__magic_name__ : int = importlib.import_module(snake_case_ )
importlib.reload(snake_case_ )
return getattr(importlib.import_module(snake_case_, package=snake_case_ ), cls )
def UpperCamelCase ( _A ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""", {} ) )
def UpperCamelCase ( _A, _A, _A=True, _A=True ):
"""simple docstring"""
__magic_name__ : Tuple = instantiate_from_config(snake_case_ )
if sd is not None:
model.load_state_dict(snake_case_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if ckpt:
__magic_name__ : Dict = torch.load(snake_case_, map_location="""cpu""" )
__magic_name__ : Dict = pl_sd['''global_step''']
print(f'loaded model from global step {global_step}.' )
else:
__magic_name__ : Optional[int] = {'''state_dict''': None}
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = load_model_from_config(config.model, pl_sd["""state_dict"""], gpu=snake_case_, eval_mode=snake_case_ )['''model''']
return model, global_step
| 324 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : Optional[int] , __A : Optional[int] , __A : List[Any] , __A : int ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict =FunnelConfig.from_json_file(snake_case_ )
print(F'Building PyTorch model from configuration: {config}' )
lowercase : Optional[int] =FunnelBaseModel(snake_case_ ) if base_model else FunnelModel(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 94 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict =random.Random()
if is_torch_available():
import torch
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Optional[Any]=1.0 , snake_case : Dict=None , snake_case : Dict=None )-> Tuple:
if rng is None:
_lowerCamelCase = global_rng
_lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=4_00 , a__=20_00 , a__=1 , a__=0.0 , a__=1_60_00 , a__=True , a__=True , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = min_seq_length
_lowerCamelCase = max_seq_length
_lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase = feature_size
_lowerCamelCase = padding_value
_lowerCamelCase = sampling_rate
_lowerCamelCase = return_attention_mask
_lowerCamelCase = do_normalize
def snake_case_ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*a__ ) )
if equal_length:
_lowerCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCamelCase = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( UpperCamelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = ASTFeatureExtractor
def snake_case_ ( self ):
_lowerCamelCase = ASTFeatureExtractionTester(self )
def snake_case_ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_lowerCamelCase = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_lowerCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCamelCase = feat_extract(a__ , padding=a__ , return_tensors='np' ).input_values
_lowerCamelCase = feat_extract(a__ , padding=a__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_lowerCamelCase = np.asarray(a__ )
_lowerCamelCase = feat_extract(a__ , return_tensors='np' ).input_values
_lowerCamelCase = feat_extract(a__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
@require_torch
def snake_case_ ( self ):
import torch
_lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase = np.random.rand(1_00 ).astype(np.floataa )
_lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case_ ( self , a__ ):
from datasets import load_dataset
_lowerCamelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCamelCase = ds.sort('id' ).select(range(a__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def snake_case_ ( self ):
# fmt: off
_lowerCamelCase = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCamelCase = self._load_datasamples(1 )
_lowerCamelCase = ASTFeatureExtractor()
_lowerCamelCase = feature_extractor(a__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4 ) )
| 650 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
lowercase__ : Optional[Any] = tuple[float, float, float]
lowercase__ : Dict = tuple[float, float, float]
def _lowerCAmelCase ( __snake_case : Pointad , __snake_case : Pointad ) -> Any:
__A : Tuple = end_pointa[0] - end_pointa[0]
__A : Union[str, Any] = end_pointa[1] - end_pointa[1]
__A : int = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCAmelCase ( __snake_case : Vectorad , __snake_case : Vectorad ) -> Tuple:
__A : Union[str, Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__A : Tuple = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__A : Optional[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCAmelCase ( __snake_case : Vectorad , __snake_case : int ) -> Any:
return tuple(round(snake_case_ , snake_case_ ) for x in vector ) == (0, 0, 0)
def _lowerCAmelCase ( __snake_case : Pointad , __snake_case : Pointad , __snake_case : Pointad , __snake_case : int = 10 ) -> int:
__A : int = create_vector(snake_case_ , snake_case_ )
__A : Union[str, Any] = create_vector(snake_case_ , snake_case_ )
return is_zero_vector(get_ad_vectors_cross(snake_case_ , snake_case_ ) , snake_case_ ) | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
__magic_name__ : Optional[Any] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__magic_name__ : Optional[int] = {value: key for key, value in MORSE_CODE_DICT.items()}
def a_ ( __lowerCAmelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def a_ ( __lowerCAmelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def a_ ( ):
lowerCAmelCase__ = '''Morse code here!'''
print(snake_case_ )
lowerCAmelCase__ = encrypt(snake_case_ )
print(snake_case_ )
lowerCAmelCase__ = decrypt(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
main()
| 615 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
import math
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__a :Dict = 'Enter the base and the power separated by a comma: '
__a :Tuple = map(int, input(prompt).split(','))
__a :Any = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__a :Tuple = res(xa, ya)
__a :Union[str, Any] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal') | 86 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : List[Any] ):
a__ : Tuple = get_activation("swish" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = get_activation("silu" )
self.assertIsInstance(lowerCamelCase__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Optional[int] = get_activation("mish" )
self.assertIsInstance(lowerCamelCase__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _UpperCamelCase( self : str ):
a__ : Dict = get_activation("gelu" )
self.assertIsInstance(lowerCamelCase__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str=13 , __lowerCAmelCase : Dict=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[str]=5 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : Optional[Any]=37 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[int]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : List[Any]=None , ) -> Tuple:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : int ) -> Any:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def a_ ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
A__ = BioGptModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
A__ = BioGptForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , *__lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = BioGptModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# create attention mask
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCAmelCase )
A__ = self.seq_length // 2
A__ = 0
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ = ids_tensor((1,) , __lowerCAmelCase ).item() + 1
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ = random_other_next_tokens
# append to next input_ids and attn_mask
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCAmelCase )] , dim=1 , )
# get two different outputs
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )['''last_hidden_state''']
A__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase , attention_mask=__lowerCAmelCase )['''last_hidden_state''']
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , *__lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
A__ = BioGptModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCAmelCase )
# first forward pass
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
A__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )['''last_hidden_state''']
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase )[
'''last_hidden_state'''
]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
A__ = BioGptForCausalLM(__lowerCAmelCase )
model.to(__lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def a_ ( self : Optional[int] , __lowerCAmelCase : Dict , *__lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
A__ = BioGptModel(__lowerCAmelCase )
A__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def a_ ( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , *__lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
A__ = self.num_labels
A__ = BioGptForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowerCamelCase : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Tuple = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[Any] = False
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = BioGptModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowerCAmelCase , gradient_checkpointing=__lowerCAmelCase )
def a_ ( self : str ) -> Tuple:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCAmelCase )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCAmelCase )
@slow
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__lowerCAmelCase )
A__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ = '''left'''
# Define PAD Token = EOS Token = 50256
A__ = tokenizer.eos_token
A__ = model.config.eos_token_id
# use different length sentences to test batching
A__ = [
'''Hello, my dog is a little''',
'''Today, I''',
]
A__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" , padding=__lowerCAmelCase )
A__ = inputs['''input_ids'''].to(__lowerCAmelCase )
A__ = model.generate(
input_ids=__lowerCAmelCase , attention_mask=inputs["""attention_mask"""].to(__lowerCAmelCase ) , )
A__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase )
A__ = model.generate(input_ids=__lowerCAmelCase )
A__ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
A__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(__lowerCAmelCase )
A__ = model.generate(input_ids=__lowerCAmelCase , max_length=model.config.max_length - num_paddings )
A__ = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCAmelCase )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCAmelCase )
A__ = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BioGptModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1 ).to(__lowerCAmelCase )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = BioGptForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = '''multi_label_classification'''
A__ = input_dict['''input_ids''']
A__ = input_ids.ne(1 ).to(__lowerCAmelCase )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = BioGptForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
A__ = model(__lowerCAmelCase )[0]
A__ = 4_23_84
A__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(__lowerCAmelCase )
torch.manual_seed(0 )
A__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
A__ = model.generate(
**__lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__lowerCAmelCase , )
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCAmelCase )
A__ = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 176 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __snake_case ( __A = 3 ) -> str:
if isinstance(snake_case_ ,snake_case_ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(snake_case_ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
lowercase : Union[str, Any] = QuantumRegister(snake_case_ ,"""qr""" )
lowercase : Union[str, Any] = ClassicalRegister(snake_case_ ,"""cr""" )
lowercase : str = QuantumCircuit(snake_case_ ,snake_case_ )
lowercase : Dict = number_of_qubits
for i in range(snake_case_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,snake_case_ ,snake_case_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case_ ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case_ ,snake_case_ )
# simulate with 10000 shots
lowercase : List[Any] = Aer.get_backend("""qasm_simulator""" )
lowercase : int = execute(snake_case_ ,snake_case_ ,shots=10000 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(
F'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}'
)
| 607 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
return self.get_dummy_input()
@property
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" )
def __UpperCamelCase ( self , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , ) ->str:
'''simple docstring'''
__a = 4
__a = 32
__a = (32, 32)
__a = torch.manual_seed(0 )
__a = torch.device(lowerCamelCase )
__a = (batch_size, num_channels) + sizes
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase )
__a = {'''hidden_states''': hidden_states}
if include_temb:
__a = 128
__a = randn_tensor((batch_size, temb_channels) , generator=lowerCamelCase , device=lowerCamelCase )
if include_res_hidden_states_tuple:
__a = torch.manual_seed(1 )
__a = (randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase ),)
if include_encoder_hidden_states:
__a = floats_tensor((batch_size, 32, 32) ).to(lowerCamelCase )
if include_skip_sample:
__a = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCamelCase , device=lowerCamelCase )
return dummy_input
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
__a = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
__a = self.dummy_input
return init_dict, inputs_dict
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = self.prepare_init_args_and_inputs_for_common()
__a = self.block_class(**lowerCamelCase )
unet_block.to(lowerCamelCase )
unet_block.eval()
with torch.no_grad():
__a = unet_block(**lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = output[0]
self.assertEqual(output.shape , self.output_shape )
__a = output[0, -1, -3:, -3:]
__a = torch.tensor(lowerCamelCase ).to(lowerCamelCase )
assert torch_all_close(output_slice.flatten() , lowerCamelCase , atol=5e-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.prepare_init_args_and_inputs_for_common()
__a = self.block_class(**lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__a = model(**lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__a = output[0]
__a = torch.device(lowerCamelCase )
__a = randn_tensor(output.shape , device=lowerCamelCase )
__a = torch.nn.functional.mse_loss(lowerCamelCase , lowerCamelCase )
loss.backward() | 448 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( _UpperCamelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = image.size
UpperCAmelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase_ = np.array(snake_case_ ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class lowerCamelCase ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , ) ->List[Any]:
super().__init__()
self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCAmelCase__ : str = None , UpperCAmelCase__ : List[Any] = 1 , UpperCAmelCase__ : Optional[Any] = 100 , UpperCAmelCase__ : Any = 0.0 , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Optional[int] = "pil" , UpperCAmelCase__ : Optional[int] = True , ) ->Union[Tuple, ImagePipelineOutput]:
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
UpperCAmelCase_ = 1
elif isinstance(UpperCAmelCase__ , torch.Tensor ):
UpperCAmelCase_ = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
if isinstance(UpperCAmelCase__ , PIL.Image.Image ):
UpperCAmelCase_ = preprocess(UpperCAmelCase__ )
UpperCAmelCase_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ = next(self.unet.parameters() ).dtype
UpperCAmelCase_ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
UpperCAmelCase_ = image.to(device=self.device , dtype=UpperCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device )
UpperCAmelCase_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(UpperCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ = torch.cat([latents, image] , dim=1 )
UpperCAmelCase_ = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
UpperCAmelCase_ = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ = self.vqvae.decode(UpperCAmelCase__ ).sample
UpperCAmelCase_ = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 )
UpperCAmelCase_ = image / 2 + 0.5
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
__magic_name__: Tuple = logging.getLogger(__name__)
@dataclass
class snake_case__ :
lowercase__ : Union[str, Any] = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ : Any = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ : Any = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ : Tuple = field(
default=UpperCamelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__ : Tuple = field(
default=UpperCamelCase_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase__ : Union[str, Any] = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase__ : int = field(
default=UpperCamelCase_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class snake_case__ :
lowercase__ : Dict = field(default=UpperCamelCase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ : List[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase__ : List[Any] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ : Dict = field(
default=UpperCamelCase_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase__ : str = field(
default=UpperCamelCase_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase__ : Union[str, Any] = field(
default=UpperCamelCase_ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowercase__ : Dict = field(
default=UpperCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase__ : Any = field(
default=UpperCamelCase_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __magic_name__ ( self ) -> List[Any]:
if self.train_file is not None:
__magic_name__ : Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__magic_name__ : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class snake_case__ :
lowercase__ : Optional[int] = 42
lowercase__ : Any = True
lowercase__ : Tuple = None
lowercase__ : str = None
def __call__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
__magic_name__ : Optional[Any] = [feature.pop(lowerCAmelCase__ ) for feature in features]
__magic_name__ : Dict = len(lowerCAmelCase__ )
__magic_name__ : int = len(features[0]["""input_ids"""] )
__magic_name__ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__ )] for feature in features
]
__magic_name__ : List[Any] = list(chain(*lowerCAmelCase__ ) )
__magic_name__ : Any = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__magic_name__ : Union[str, Any] = {k: v.view(lowerCAmelCase__ , lowerCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__magic_name__ : List[str] = torch.tensor(lowerCAmelCase__ , dtype=torch.intaa )
return batch
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""", snake_case_, snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__ : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__magic_name__ : Union[str, Any] = {}
if data_args.train_file is not None:
__magic_name__ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ : Dict = data_args.validation_file
__magic_name__ : Tuple = data_args.train_file.split(""".""" )[-1]
__magic_name__ : int = load_dataset(
snake_case_, data_files=snake_case_, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
# Downloading and loading the swag dataset from the hub.
__magic_name__ : Optional[Any] = load_dataset(
"""swag""", """regular""", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__magic_name__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__magic_name__ : List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=snake_case_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__magic_name__ : Tuple = [f'ending{i}' for i in range(4 )]
__magic_name__ : List[str] = '''sent1'''
__magic_name__ : str = '''sent2'''
if data_args.max_seq_length is None:
__magic_name__ : str = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__magic_name__ : List[str] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__magic_name__ : Any = min(data_args.max_seq_length, tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_A ):
__magic_name__ : Any = [[context] * 4 for context in examples[context_name]]
__magic_name__ : List[Any] = examples[question_header_name]
__magic_name__ : Union[str, Any] = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(snake_case_ )
]
# Flatten out
__magic_name__ : Dict = list(chain(*snake_case_ ) )
__magic_name__ : Union[str, Any] = list(chain(*snake_case_ ) )
# Tokenize
__magic_name__ : Tuple = tokenizer(
snake_case_, snake_case_, truncation=snake_case_, max_length=snake_case_, padding="""max_length""" if data_args.pad_to_max_length else False, )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(snake_case_ ), 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__magic_name__ : Optional[int] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__magic_name__ : Optional[int] = min(len(snake_case_ ), data_args.max_train_samples )
__magic_name__ : int = train_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__magic_name__ : Any = train_dataset.map(
snake_case_, batched=snake_case_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__magic_name__ : List[str] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__magic_name__ : int = min(len(snake_case_ ), data_args.max_eval_samples )
__magic_name__ : Optional[Any] = eval_dataset.select(range(snake_case_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__magic_name__ : Dict = eval_dataset.map(
snake_case_, batched=snake_case_, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, )
# Data collator
__magic_name__ : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=snake_case_, pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_A ):
__magic_name__ : Tuple = eval_predictions
__magic_name__ : Union[str, Any] = np.argmax(snake_case_, axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__magic_name__ : str = Trainer(
model=snake_case_, args=snake_case_, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=snake_case_, data_collator=snake_case_, compute_metrics=snake_case_, )
# Training
if training_args.do_train:
__magic_name__ : Tuple = None
if training_args.resume_from_checkpoint is not None:
__magic_name__ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__ : Optional[int] = last_checkpoint
__magic_name__ : int = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ : Optional[Any] = train_result.metrics
__magic_name__ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
__magic_name__ : Any = min(snake_case_, len(snake_case_ ) )
trainer.log_metrics("""train""", snake_case_ )
trainer.save_metrics("""train""", snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ : List[Any] = trainer.evaluate()
__magic_name__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
__magic_name__ : str = min(snake_case_, len(snake_case_ ) )
trainer.log_metrics("""eval""", snake_case_ )
trainer.save_metrics("""eval""", snake_case_ )
__magic_name__ : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def UpperCamelCase ( _A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 324 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : Dict = 6_5536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : Dict = "fourier" , UpperCAmelCase : Union[str, Any] = True , UpperCAmelCase : Any = False , UpperCAmelCase : str = 0.0 , UpperCAmelCase : Dict = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Dict = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Union[str, Any] = "UNetMidBlock1D" , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : List[str] = (32, 32, 64) , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 8 , UpperCAmelCase : Any = 1 , UpperCAmelCase : Optional[int] = False , ) -> str:
'''simple docstring'''
super().__init__()
lowercase : Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
lowercase : List[Any] =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowercase : int =2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : Union[str, Any] =Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowercase : List[str] =block_out_channels[0]
if use_timestep_embedding:
lowercase : Optional[int] =block_out_channels[0] * 4
lowercase : Tuple =TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowercase : Any =nn.ModuleList([] )
lowercase : int =None
lowercase : Optional[Any] =nn.ModuleList([] )
lowercase : List[str] =None
# down
lowercase : Optional[Any] =in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowercase : Dict =output_channel
lowercase : str =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : Any =i == len(UpperCAmelCase ) - 1
lowercase : Tuple =get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowercase : Optional[int] =get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowercase : Any =list(reversed(UpperCAmelCase ) )
lowercase : List[str] =reversed_block_out_channels[0]
if out_block_type is None:
lowercase : str =out_channels
else:
lowercase : str =block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowercase : Any =output_channel
lowercase : Any =(
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowercase : Union[str, Any] =i == len(UpperCAmelCase ) - 1
lowercase : List[str] =get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowercase : List[str] =output_channel
# out
lowercase : Any =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase : int =get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A__ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowercase : Dict =timestep
if not torch.is_tensor(UpperCAmelCase ):
lowercase : Dict =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : Optional[int] =timesteps[None].to(sample.device )
lowercase : Dict =self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowercase : Any =self.time_mlp(UpperCAmelCase )
else:
lowercase : Optional[Any] =timestep_embed[..., None]
lowercase : Dict =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : Tuple =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : Optional[Any] =()
for downsample_block in self.down_blocks:
lowercase : Optional[Any] =downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : List[Any] =self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : Union[str, Any] =down_block_res_samples[-1:]
lowercase : Optional[Any] =down_block_res_samples[:-1]
lowercase : int =upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowercase : Any =self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 94 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str )-> Tuple:
_lowerCamelCase = len(snake_case_ )
_lowerCamelCase = []
for i in range(len(snake_case_ ) - pat_len + 1 ):
_lowerCamelCase = True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
_lowerCamelCase = False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
| 650 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any] ) -> Optional[int]:
__A : Dict = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
__A : str = downstream_dict['''projector.weight''']
__A : Union[str, Any] = downstream_dict['''projector.bias''']
__A : Tuple = downstream_dict['''model.post_net.linear.weight''']
__A : List[str] = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : str ) -> str:
__A : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
__A : str = downstream_dict['''model.linear.weight''']
__A : List[str] = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Any , __snake_case : Dict ) -> Tuple:
__A : Optional[Any] = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ )
__A : List[str] = downstream_dict['''connector.weight''']
__A : str = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__A : List[Any] = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__A : Any = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__A : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__A : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__A : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__A : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__A : Optional[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Dict ) -> Tuple:
__A : Optional[Any] = torch.load(snake_case_ , map_location='cpu' )
__A : Union[str, Any] = checkpoint['''Downstream''']
__A : Optional[int] = UniSpeechSatConfig.from_pretrained(snake_case_ )
__A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
__A : int = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__A : int = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('ForAudioFrameClassification' ):
__A : Tuple = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('ForXVector' ):
__A : Dict = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__A : Any = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowercase__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 8 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
lowercase_ : Any = "MCTCTFeatureExtractor"
lowercase_ : Tuple = "AutoTokenizer"
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Any ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ = kwargs.pop('''audio''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''sampling_rate''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''text''' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
lowerCAmelCase__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings['''input_ids''']
return inputs
def A__ ( self : Any , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''input_features''' , __lowerCamelCase )
lowerCAmelCase__ = kwargs.pop('''labels''' , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if input_features is not None:
lowerCAmelCase__ = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
lowerCAmelCase__ = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase__ = labels['''input_ids''']
return input_features
def A__ ( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def A__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 615 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a :Tuple = logging.get_logger(__name__)
class _a ( UpperCamelCase_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['input_values', 'attention_mask']
def __init__( self : List[Any] , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : List[str] = 16000 , UpperCAmelCase : Tuple = 0.0 , UpperCAmelCase : Optional[Any] = False , UpperCAmelCase : int = 80 , UpperCAmelCase : Optional[int] = 16 , UpperCAmelCase : int = 64 , UpperCAmelCase : Tuple = "hann_window" , UpperCAmelCase : Tuple = 1.0 , UpperCAmelCase : Union[str, Any] = 80 , UpperCAmelCase : List[str] = 7600 , UpperCAmelCase : Dict = 1E-10 , UpperCAmelCase : Optional[Any] = 2 , UpperCAmelCase : Tuple = True , **UpperCAmelCase : Tuple , ):
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
A_ = do_normalize
A_ = return_attention_mask
A_ = num_mel_bins
A_ = hop_length
A_ = win_length
A_ = win_function
A_ = frame_signal_scale
A_ = fmin
A_ = fmax
A_ = mel_floor
A_ = reduction_factor
A_ = win_length * sampling_rate // 1000
A_ = hop_length * sampling_rate // 1000
A_ = optimal_fft_length(self.sample_size )
A_ = (self.n_fft // 2) + 1
A_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCAmelCase )
A_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] = 0.0 ):
if attention_mask is not None:
A_ = np.array(UpperCAmelCase , np.intaa )
A_ = []
for vector, length in zip(UpperCAmelCase , attention_mask.sum(-1 ) ):
A_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A_ = padding_value
normed_input_values.append(UpperCAmelCase )
else:
A_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __A ( self : Optional[Any] , UpperCAmelCase : Tuple , ):
A_ = spectrogram(
UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self : List[str] , UpperCAmelCase : int = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Union[str, Any] = False , UpperCAmelCase : Tuple = None , UpperCAmelCase : Optional[Any] = False , UpperCAmelCase : List[Any] = None , UpperCAmelCase : str = None , UpperCAmelCase : int = None , UpperCAmelCase : str = None , **UpperCAmelCase : Tuple , ):
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
A_ = self._process_audio(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , )
else:
A_ = None
if audio_target is not None:
A_ = self._process_audio(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
A_ = inputs_target['''input_values''']
A_ = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
A_ = decoder_attention_mask
return inputs
def __A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : str = False , UpperCAmelCase : Optional[int] = False , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[str] = False , UpperCAmelCase : Tuple = None , UpperCAmelCase : Any = None , UpperCAmelCase : Union[str, Any] = None , **UpperCAmelCase : Any , ):
A_ = isinstance(UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A_ = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
A_ = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ = [speech]
# needed to make pad() work on spectrogram inputs
A_ = self.feature_size
# convert into correct format for padding
if is_target:
A_ = [self._extract_mel_features(UpperCAmelCase ) for waveform in speech]
A_ = BatchFeature({"input_values": features} )
A_ = self.num_mel_bins
else:
A_ = BatchFeature({"input_values": speech} )
A_ = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
A_ = feature_size_hack
# convert input values to correct format
A_ = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
A_ = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ = (
attention_mask
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=UpperCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
def __A ( self : List[str] ):
A_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output | 86 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class A__ ( UpperCamelCase_ ):
"""simple docstring"""
_lowercase = ['pixel_values']
def __init__( self : List[str] , lowerCamelCase__ : List[str] = True , lowerCamelCase__ : List[Any] = None , lowerCamelCase__ : int = PILImageResampling.BILINEAR , lowerCamelCase__ : Tuple = True , lowerCamelCase__ : Any = 1 / 255 , lowerCamelCase__ : Any = True , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : Dict = True , **lowerCamelCase__ : Any , ):
super().__init__(**lowerCamelCase__ )
a__ : Tuple = size if size is not None else {'''shortest_edge''': 224}
a__ : Any = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
a__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
a__ : Tuple = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
a__ : List[str] = do_resize
a__ : List[Any] = size
a__ : Optional[int] = resample
a__ : Any = do_rescale
a__ : str = rescale_factor
a__ : str = do_center_crop
a__ : int = crop_size
a__ : List[str] = do_flip_channel_order
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str = PIL.Image.BILINEAR , lowerCamelCase__ : List[Any] = None , **lowerCamelCase__ : List[str] , ):
a__ : Any = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
a__ : Dict = get_resize_output_image_size(lowerCamelCase__ , size=size["shortest_edge"] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any = None , **lowerCamelCase__ : Any , ):
a__ : Optional[Any] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] = None , **lowerCamelCase__ : List[Any] , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str = None ):
return flip_channel_order(lowerCamelCase__ , data_format=lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any = None , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : List[str] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , lowerCamelCase__ : int = ChannelDimension.FIRST , **lowerCamelCase__ : List[str] , ):
a__ : int = do_resize if do_resize is not None else self.do_resize
a__ : List[str] = resample if resample is not None else self.resample
a__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : Optional[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
a__ : List[Any] = size if size is not None else self.size
a__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
a__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
a__ : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
a__ : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
a__ : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
a__ : Any = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
a__ : Dict = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
a__ : Dict = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
a__ : Any = [self.flip_channel_order(image=lowerCamelCase__ ) for image in images]
a__ : int = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
a__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple = None ):
a__ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCamelCase__ ):
a__ : Any = target_sizes.numpy()
a__ : Any = []
for idx in range(len(lowerCamelCase__ ) ):
a__ : str = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCamelCase__ )
a__ : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
a__ : List[Any] = logits.argmax(dim=1 )
a__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 37 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A :
'''simple docstring'''
__lowerCamelCase : Optional[Any] = None
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __lowerCAmelCase )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__lowerCAmelCase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_json_file(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__lowerCAmelCase )[0]
check_json_file_has_correct_format(__lowerCAmelCase )
A__ = self.feature_extraction_class.from_pretrained(__lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.feature_extraction_class()
self.assertIsNotNone(__lowerCAmelCase )
| 176 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowercase : int = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(snake_case )
from datasets import load_dataset
lowercase : List[str] = load_dataset("""nielsr/rvlcdip-demo""" )
lowercase : Dict = dataset['''train'''][0]['''image'''].convert("""RGB""" )
lowercase : List[str] = image_processor(snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase : Optional[int] = model(**snake_case )
lowercase : Optional[Any] = outputs.logits
lowercase : Tuple = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , snake_case )
lowercase : Tuple = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=snake_case , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case , atol=1E-4 ) )
| 607 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: List[Any]=False ) -> Any:
"""simple docstring"""
__a = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__a = ''''''
else:
__a = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__a = in_proj_weight[
: config.hidden_size, :
]
__a = in_proj_bias[: config.hidden_size]
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int] ) -> str:
"""simple docstring"""
__a = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_, snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str], SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: str ) -> List[Any]:
"""simple docstring"""
__a = dct.pop(snake_case_ )
__a = val
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(snake_case_, stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any], SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Optional[int]=False ) -> Dict:
"""simple docstring"""
__a = BitConfig(
global_padding='same', layer_type='bottleneck', depths=(3, 4, 9), out_features=['stage3'], embedding_dynamic_padding=snake_case_, )
__a = ViTHybridConfig(backbone_config=snake_case_, image_size=384, num_labels=1000 )
__a = False
# load original model from timm
__a = timm.create_model(snake_case_, pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
__a = create_rename_keys(snake_case_, snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_, snake_case_, snake_case_ )
read_in_q_k_v(snake_case_, snake_case_, snake_case_ )
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = json.load(open(hf_hub_download(snake_case_, snake_case_, repo_type='dataset' ), 'r' ) )
__a = {int(snake_case_ ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__a = ViTHybridModel(snake_case_ ).eval()
else:
__a = ViTHybridForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# create image processor
__a = create_transform(**resolve_data_config({}, model=snake_case_ ) )
__a = transform.transforms
__a = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__a = ViTHybridImageProcessor(
do_resize=snake_case_, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=snake_case_, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=snake_case_, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
__a = prepare_img()
__a = transform(snake_case_ ).unsqueeze(0 )
__a = processor(snake_case_, return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_, snake_case_ )
# verify logits
with torch.no_grad():
__a = model(snake_case_ )
__a = outputs.logits
print('Predicted class:', logits.argmax(-1 ).item() )
if base_model:
__a = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_, outputs.pooler_output, atol=1e-3 )
else:
__a = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_, outputs.logits, atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__UpperCamelCase : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase__ : List[Any] = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __lowerCamelCase ( _UpperCamelCase : str = "mumbai" ):
'''simple docstring'''
UpperCAmelCase_ = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCAmelCase_ = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCAmelCase_ = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__magic_name__: str = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase ( _A ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def UpperCamelCase ( _A ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__magic_name__ : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(snake_case_, id=snake_case_ )
| 324 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
UpperCamelCase_ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
UpperCamelCase_ = '''document_qa'''
UpperCamelCase_ = AutoProcessor
UpperCamelCase_ = VisionEncoderDecoderModel
UpperCamelCase_ = ['''image''', '''text''']
UpperCamelCase_ = ['''text''']
def __init__( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : Optional[int] ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase : Optional[Any] =task_prompt.replace('''{user_input}''' , UpperCAmelCase )
lowercase : Optional[int] =self.pre_processor.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors='''pt''' ).input_ids
lowercase : Any =self.pre_processor(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def A__ ( self : int , UpperCAmelCase : Any ) -> int:
'''simple docstring'''
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase , ).sequences
def A__ ( self : List[Any] , UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.pre_processor.batch_decode(UpperCAmelCase )[0]
lowercase : Optional[Any] =sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase : Any =sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase : int =re.sub(R'''<.*?>''' , '''''' , UpperCAmelCase , count=1 ).strip() # remove first task start token
lowercase : List[str] =self.pre_processor.tokenajson(UpperCAmelCase )
return sequence["answer"]
| 94 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int | float | str )-> Dict:
try:
_lowerCamelCase = float(snake_case_ )
except ValueError:
raise ValueError('Please enter a valid number' )
_lowerCamelCase = decimal - int(snake_case_ )
if fractional_part == 0:
return int(snake_case_ ), 1
else:
_lowerCamelCase = len(str(snake_case_ ).split('.' )[1] )
_lowerCamelCase = int(decimal * (10**number_of_frac_digits) )
_lowerCamelCase = 10**number_of_frac_digits
_lowerCamelCase = denominator, numerator
while True:
_lowerCamelCase = dividend % divisor
if remainder == 0:
break
_lowerCamelCase = divisor, remainder
_lowerCamelCase = numerator / divisor, denominator / divisor
return int(snake_case_ ), int(snake_case_ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 650 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase__ : Tuple = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase__ : int = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowerCAmelCase ( __snake_case : list[list[int]] ) -> str:
__A : Optional[Any] = []
for i in range(len(snake_case_ ) ):
__A : Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__A : List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(snake_case_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(snake_case_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(snake_case_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__A : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(snake_case_ )
return next_generation
def _lowerCAmelCase ( __snake_case : list[list[int]] , __snake_case : int ) -> Union[str, Any]:
__A : str = []
for _ in range(snake_case_ ):
# Create output image
__A : Any = Image.new('RGB' , (len(cells[0] ), len(snake_case_ )) )
__A : List[str] = img.load()
# Save cells to image
for x in range(len(snake_case_ ) ):
for y in range(len(cells[0] ) ):
__A : Optional[Any] = 2_55 - cells[y][x] * 2_55
__A : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(snake_case_ )
__A : int = new_generation(snake_case_ )
return images
if __name__ == "__main__":
lowercase__ : str = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:]) | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
import numpy as np
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = int(np.ceil((x_end - xa) / h ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(snake_case_ ):
lowerCAmelCase__ = f(snake_case_ , y[k] )
lowerCAmelCase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowerCAmelCase__ = f(x + h , y[k] + h * ka )
lowerCAmelCase__ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 615 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__a :Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__a :Union[str, Any] = logging.getLogger()
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser()
parser.add_argument("-f" )
A_ = parser.parse_args()
return args.f
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any]="eval" ):
"""simple docstring"""
A_ = os.path.join(snake_case_ ,f'''{split}_results.json''' )
if os.path.exists(snake_case_ ):
with open(snake_case_ ,"r" ) as f:
return json.load(snake_case_ )
raise ValueError(f'''can\'t find {path}''' )
__a :Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _a ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : int ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_flax_glue.main()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __A ( self : Optional[int] ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_clm_flax.main()
A_ = get_results(UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def __A ( self : List[Any] ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_summarization_flax.main()
A_ = get_results(UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __A ( self : str ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_mlm_flax.main()
A_ = get_results(UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def __A ( self : Tuple ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_ta_mlm_flax.main()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __A ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
A_ = 7 if get_gpu_count() > 1 else 2
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_flax_ner.main()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __A ( self : int ):
A_ = self.get_auto_remove_tmp_dir()
A_ = f'''\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '''.split()
with patch.object(UpperCAmelCase , "argv" , UpperCAmelCase ):
run_qa.main()
A_ = get_results(UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 ) | 86 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ):
a__ : Any = data
a__ : Node | None = None
class A__ :
"""simple docstring"""
def __init__( self : List[Any] ):
a__ : Any = None
a__ : Any = None
def __iter__( self : str ):
a__ : List[Any] = self.head
while self.head:
yield node.data
a__ : Optional[int] = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any ):
self.insert_nth(len(self ) , lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
self.insert_nth(0 , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
a__ : Optional[int] = Node(lowerCamelCase__ )
if self.head is None:
a__ : Optional[Any] = new_node # first node points itself
a__ : Optional[int] = new_node
elif index == 0: # insert at head
a__ : Union[str, Any] = self.head
a__ : Any = new_node
else:
a__ : Dict = self.head
for _ in range(index - 1 ):
a__ : Union[str, Any] = temp.next
a__ : Optional[Any] = temp.next
a__ : List[Any] = new_node
if index == len(self ) - 1: # insert at tail
a__ : Tuple = new_node
def _UpperCamelCase( self : Optional[int] ):
return self.delete_nth(0 )
def _UpperCamelCase( self : List[str] ):
return self.delete_nth(len(self ) - 1 )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[str, Any] = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
a__ : Any = self.head
if self.head == self.tail: # just one node
a__ : str = None
elif index == 0: # delete head node
a__ : int = self.tail.next.next
a__ : Dict = self.head.next
else:
a__ : Dict = self.head
for _ in range(index - 1 ):
a__ : Any = temp.next
a__ : List[str] = temp.next
a__ : int = temp.next.next
if index == len(self ) - 1: # delete at tail
a__ : List[str] = temp
return delete_node.data
def _UpperCamelCase( self : Any ):
return len(self ) == 0
def UpperCamelCase_ ( ) -> str:
a__ : Union[str, Any] = CircularLinkedList()
assert len(snake_case_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case_ ) == i
circular_linked_list.insert_nth(snake_case_ , i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
class A :
'''simple docstring'''
__lowerCamelCase : Optional[int] = 42
__lowerCamelCase : Any = None
@staticmethod
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def a_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError
def a_ ( self : Dict , __lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
raise NotImplementedError
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def a_ ( cls : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return f'`pip install {cls.pip_package or cls.name}`'
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Any = '''optuna'''
@staticmethod
def a_ ( ) -> Tuple:
"""simple docstring"""
return is_optuna_available()
def a_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Dict , __lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return default_hp_space_optuna(__lowerCAmelCase )
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = '''ray'''
__lowerCamelCase : Optional[int] = '''\'ray[tune]\''''
@staticmethod
def a_ ( ) -> int:
"""simple docstring"""
return is_ray_available()
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ) -> int:
"""simple docstring"""
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Dict , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
return default_hp_space_ray(__lowerCAmelCase )
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : Any = '''sigopt'''
@staticmethod
def a_ ( ) -> Optional[int]:
"""simple docstring"""
return is_sigopt_available()
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return default_hp_space_sigopt(__lowerCAmelCase )
class A (UpperCamelCase_ ):
'''simple docstring'''
__lowerCamelCase : str = '''wandb'''
@staticmethod
def a_ ( ) -> Dict:
"""simple docstring"""
return is_wandb_available()
def a_ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , **__lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def a_ ( self : List[str] , __lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
return default_hp_space_wandb(__lowerCAmelCase )
A : List[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
A__ = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
F'{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 176 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
from itertools import permutations
def __snake_case ( __A ) -> Optional[Any]:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(snake_case_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __snake_case ( __A = 10 ) -> List[Any]:
return sum(
int("""""".join(map(snake_case_ ,snake_case_ ) ) )
for num in permutations(range(snake_case_ ) )
if is_substring_divisible(snake_case_ ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 607 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) ->List[str]:
'''simple docstring'''
__a = ''''''
__a = ''''''
__a = []
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
__a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
__a = self.__min_dist_top_down_dp(lowerCamelCase , n - 1 )
__a = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase )
__a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
__a = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->int:
'''simple docstring'''
__a = worda
__a = worda
__a = [[-1 for _ in range(len(lowerCamelCase ) )] for _ in range(len(lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase ) - 1 , len(lowerCamelCase ) - 1 )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->int:
'''simple docstring'''
__a = worda
__a = worda
__a = len(lowerCamelCase )
__a = len(lowerCamelCase )
__a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
__a = j
elif j == 0: # second string is empty
__a = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
__a = self.dp[i - 1][j - 1]
else:
__a = self.dp[i][j - 1]
__a = self.dp[i - 1][j]
__a = self.dp[i - 1][j - 1]
__a = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
__UpperCamelCase : Any = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
__UpperCamelCase : Optional[Any] = input("""Enter the first string: """).strip()
__UpperCamelCase : List[Any] = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""") | 448 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) ->int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) ->List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ) ->Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int ) ->int:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) ->List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) ->List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) ->Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ) ->Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ) ->Tuple:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) ->Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) ->Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) ->int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ) ->List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) ->Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case__ ( UpperCamelCase_ ):
lowercase__ : Optional[int] = '''facebook/bart-large-mnli'''
lowercase__ : List[str] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase__ : int = '''text_classifier'''
lowercase__ : Union[str, Any] = AutoTokenizer
lowercase__ : Dict = AutoModelForSequenceClassification
lowercase__ : Dict = ['''text''', ['''text''']]
lowercase__ : Dict = ['''text''']
def __magic_name__ ( self ) -> Optional[Any]:
super().setup()
__magic_name__ : str = self.model.config
__magic_name__ : Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
__magic_name__ : Optional[Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Dict = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : str = outputs.logits
__magic_name__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 324 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowercase : Optional[Any] =flax_key_tuple[:-1] + ('''weight''',)
lowercase : Union[str, Any] =torch.permute(snake_case_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case_ ):
# linear layer
lowercase : List[str] =flax_key_tuple[:-1] + ('''weight''',)
lowercase : Optional[int] =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase : List[str] =flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowercase_ ( __A : Optional[int] , __A : int , __A : Any ) -> Dict:
"""simple docstring"""
if "metadata" in layer:
lowercase : str =layer.split('''metadata''' )
lowercase : Optional[int] =''''''.join(split_layer[0] )[:-1]
lowercase : Optional[Any] =[tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
lowercase : int =layer.split('''kvstore''' )
lowercase : List[Any] =''''''.join(split_layer[0] )[:-1]
lowercase : Tuple =[tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
lowercase : str =layer.split('''/''' )
lowercase : int ='''/'''.join(split_layer[:-1] )
lowercase : Optional[Any] =(split_layer[-1],)
if "kvstore/path" in layer:
lowercase : List[Any] =F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
lowercase : List[Any] ='''file'''
else:
lowercase : Any =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase_ ( __A : Optional[Any] , __A : str ) -> Tuple:
"""simple docstring"""
lowercase : str =rename_keys(snake_case_ )
lowercase : List[str] ={}
for k, v in current_block.items():
lowercase : List[Any] =v
lowercase : Tuple =new_current_block
torch.save(snake_case_ , snake_case_ )
def lowercase_ ( __A : List[str] , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : str = WEIGHTS_NAME ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict =convert_file_size_to_int(snake_case_ )
lowercase : Optional[Any] =[]
lowercase : List[str] ={}
lowercase : Optional[int] =0
lowercase : Dict =0
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
lowercase : Optional[int] =serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
lowercase : Optional[int] =flatten_dict(snake_case_ , sep='''/''' )
lowercase : List[Any] ={}
for layer in checkpoint_info.keys():
lowercase : Dict =get_key_and_tensorstore_dict(
snake_case_ , snake_case_ , snake_case_ )
if curr_real_layer_name in all_layers:
lowercase : Tuple =content
else:
lowercase : List[Any] ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowercase : Union[str, Any] =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowercase : Dict =torch.tensor(snake_case_ )
lowercase : Dict =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowercase : Optional[Any] =rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case_ )
lowercase : str ='''/'''.join(snake_case_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowercase : Optional[Any] =os.path.join(
snake_case_ , weights_name.replace('''.bin''' , F'-{len(snake_case_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case_ , snake_case_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowercase : Any ={}
lowercase : Any =0
lowercase : List[str] =raw_weights.to(getattr(snake_case_ , snake_case_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowercase : Any =os.path.join(snake_case_ , weights_name.replace('''.bin''' , F'-{len(snake_case_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case_ , snake_case_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowercase : Dict ={}
lowercase : List[str] ={}
for idx, shard in enumerate(snake_case_ ):
lowercase : int =weights_name.replace(
'''.bin''' , F'-{idx+1:05d}-of-{len(snake_case_ ):05d}.bin' ) # len(sharded_state_dicts):05d}
lowercase : Tuple =os.path.join(snake_case_ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
lowercase : Optional[Any] =shard
for key in shard:
lowercase : Union[str, Any] =shard_file
# Add the metadata
lowercase : str ={'''total_size''': total_size}
lowercase : str ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case_ , snake_case_ ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase : List[Any] =json.dumps(snake_case_ , indent=2 , sort_keys=snake_case_ ) + '''\n'''
f.write(snake_case_ )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase_ ( ) -> str:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowercase : str =SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
lowercase : Tuple =SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
lowercase : Any =TaTokenizer.from_pretrained('''t5-small''' )
lowercase : Any ='''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
lowercase : Optional[int] =tokenizer(snake_case_ , return_tensors='''pt''' ).input_ids
lowercase : List[str] =model.generate(snake_case_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 94 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
A_ : Optional[int] =shutil.get_terminal_size()
A_ : Union[str, Any] ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __a ( enum.Enum ):
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : str = 1
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Any="" )-> Dict:
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : str , snake_case : Dict="" )-> Optional[int]:
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( )-> Dict:
forceWrite('\r' )
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : str )-> Optional[int]:
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def SCREAMING_SNAKE_CASE_ ( )-> Optional[Any]:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 650 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list , __snake_case : int = 0 ) -> Union[str, Any]:
__A : Union[str, Any] = length or len(snake_case_ )
__A : Tuple = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A : Any = list_data[i + 1], list_data[i]
__A : Tuple = True
return list_data if not swapped else bubble_sort(snake_case_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Tuple=18 , __lowerCamelCase : str=30 , __lowerCamelCase : Optional[Any]=4_00 , __lowerCamelCase : int=True , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size if size is not None else {'''height''': 18, '''width''': 20}
lowerCAmelCase__ = do_thumbnail
lowerCAmelCase__ = do_align_axis
lowerCAmelCase__ = do_pad
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def A__ ( self : Any ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ , unittest.TestCase ):
lowercase_ : Any = DonutImageProcessor if is_vision_available() else None
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def A__ ( self : Any ):
"""simple docstring"""
pass
@is_flaky()
def A__ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A__ ( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 615 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__a :Optional[Any] = TypeVar('T')
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : str ):
A_ = data
A_ = self
A_ = 0
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] ):
# map from node name to the node object
A_ = {}
def __A ( self : List[Any] , UpperCAmelCase : str ):
# create a new set with x as its member
A_ = DisjointSetTreeNode(UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : Optional[int] ):
# find the set x belongs to (with path-compression)
A_ = self.map[data]
if elem_ref != elem_ref.parent:
A_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __A ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str ):
# helper function for union operation
if nodea.rank > nodea.rank:
A_ = nodea
else:
A_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : int ):
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase ) , self.find_set(UpperCAmelCase ) )
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ):
# connections: map from the node to the neighbouring nodes (with weights)
A_ = {}
def __A ( self : List[str] , UpperCAmelCase : Optional[int] ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
A_ = {}
def __A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
# add an edge with the given weight
self.add_node(UpperCAmelCase )
self.add_node(UpperCAmelCase )
A_ = weight
A_ = weight
def __A ( self : int ):
A_ = []
A_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase : x[2] )
# creating the disjoint set
A_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase )
# MST generation
A_ = 0
A_ = 0
A_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
A_ = edges[index]
index += 1
A_ = disjoint_set.find_set(UpperCAmelCase )
A_ = disjoint_set.find_set(UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
disjoint_set.union(UpperCAmelCase , UpperCAmelCase )
return graph | 86 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : str ):
a__ : Tuple = tempfile.mkdtemp()
# fmt: off
a__ : Optional[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
a__ : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
a__ : Optional[int] = {'''unk_token''': '''<unk>'''}
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
a__ : Optional[int] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
a__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Any , **lowerCamelCase__ : List[Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : List[str] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase( self : List[str] ):
a__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase( self : Dict ):
a__ : Any = self.get_tokenizer()
a__ : Optional[int] = self.get_rust_tokenizer()
a__ : Union[str, Any] = self.get_image_processor()
a__ : Optional[int] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
a__ : List[str] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
a__ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : List[str] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
a__ : Optional[int] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Any = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Optional[int] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Any = image_processor(lowerCamelCase__ , return_tensors="np" )
a__ : str = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase( self : int ):
a__ : Optional[int] = self.get_image_processor()
a__ : Optional[int] = self.get_tokenizer()
a__ : int = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Any = '''lower newer'''
a__ : List[Any] = processor(text=lowerCamelCase__ )
a__ : Tuple = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase( self : List[str] ):
a__ : Tuple = self.get_image_processor()
a__ : Optional[Any] = self.get_tokenizer()
a__ : int = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Optional[Any] = '''lower newer'''
a__ : Dict = self.prepare_image_inputs()
a__ : Dict = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Any = self.prepare_image_inputs()
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : List[str] = processor(images=lowerCamelCase__ , visual_prompt=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _UpperCamelCase( self : List[Any] ):
a__ : List[str] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Optional[Any] = CLIPSegProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
a__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Any = processor.batch_decode(lowerCamelCase__ )
a__ : Any = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 37 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : List[Any] = 2_5_6_0_4_7
A : str = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class A (UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = NllbTokenizer
__lowerCamelCase : Any = NllbTokenizerFast
__lowerCamelCase : Tuple = True
__lowerCamelCase : Any = True
__lowerCamelCase : List[str] = {}
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = NllbTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
A__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
A__ = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(__lowerCAmelCase )
A__ = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
if not self.test_seqaseq:
return
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
A__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
A__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ = tokenizer.prepare_seqaseq_batch(
__lowerCAmelCase , tgt_texts=__lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=__lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def a_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = [AddedToken("""<special>""" , lstrip=__lowerCAmelCase )]
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = tokenizer_r.encode("""Hey this is a <special> token""" )
A__ = tokenizer_r.encode("""<special>""" , add_special_tokens=__lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
A__ = self.tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase )
A__ = tokenizer_p.encode("""Hey this is a <special> token""" )
A__ = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''facebook/nllb-200-distilled-600M'''
__lowerCamelCase : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__lowerCamelCase : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__lowerCamelCase : str = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def a_ ( cls : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A__ = 1
return cls
def a_ ( self : Tuple ) -> int:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 )
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
A__ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
A__ = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def a_ ( self : Any ) -> Dict:
"""simple docstring"""
A__ = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
A__ = 10
A__ = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] )
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
A__ = NllbTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A__ = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
A__ = targets['''input_ids''']
A__ = shift_tokens_right(
__lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[25_60_47, 70, 73_56, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_60_57,
} , )
@require_torch
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
A__ = True
A__ = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
A__ = False
A__ = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 176 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Any =logging.get_logger(__name__)
def __snake_case ( __A ) -> List[Any]:
lowercase : Tuple = DPTConfig()
if "large" in checkpoint_url:
lowercase : Union[str, Any] = 1024
lowercase : Optional[Any] = 4096
lowercase : List[str] = 24
lowercase : Any = 16
lowercase : str = [5, 11, 17, 23]
lowercase : List[Any] = [256, 512, 1024, 1024]
lowercase : str = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : List[Any] = True
lowercase : List[Any] = 150
lowercase : Tuple = '''huggingface/label-files'''
lowercase : Optional[int] = '''ade20k-id2label.json'''
lowercase : Dict = json.load(open(cached_download(hf_hub_url(snake_case_ ,snake_case_ ,repo_type="""dataset""" ) ) ,"""r""" ) )
lowercase : str = {int(snake_case_ ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Any = {v: k for k, v in idalabel.items()}
lowercase : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def __snake_case ( __A ) -> Optional[Any]:
lowercase : str = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ ,snake_case_ )
def __snake_case ( __A ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : str = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
lowercase : Tuple = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
lowercase : List[Any] = name.replace("""patch_embed""" ,"""patch_embeddings""" )
if "pos_embed" in name:
lowercase : List[str] = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
lowercase : List[Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
lowercase : List[str] = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
lowercase : Dict = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : int = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name:
lowercase : str = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
lowercase : int = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
lowercase : Optional[Any] = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
lowercase : str = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
lowercase : List[str] = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
lowercase : Optional[int] = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
lowercase : int = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
lowercase : int = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
lowercase : Tuple = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : Union[str, Any] = name.replace(F'''refinenet{layer_idx}''' ,F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowercase : List[str] = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
lowercase : Dict = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
lowercase : Any = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
lowercase : str = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
lowercase : Union[str, Any] = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : Union[str, Any] = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Union[str, Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : Any = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Optional[Any] = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Optional[int] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowercase : List[str] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowercase : List[Any] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowercase : Tuple = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Optional[int] = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowercase : str = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
lowercase : Optional[int] = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
lowercase : Optional[int] = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
lowercase : List[Any] = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
lowercase : Union[str, Any] = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
return name
def __snake_case ( __A ,__A ) -> List[str]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Optional[int] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowercase : Dict = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __snake_case ( ) -> Optional[Any]:
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Dict = Image.open(requests.get(snake_case_ ,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ,__A ) -> Tuple:
lowercase : Tuple = get_dpt_config(snake_case_ )
# load original state_dict from URL
lowercase : List[Any] = torch.hub.load_state_dict_from_url(snake_case_ ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
lowercase : Tuple = state_dict.pop(snake_case_ )
lowercase : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(snake_case_ ,snake_case_ )
# load HuggingFace model
lowercase : List[str] = DPTForSemanticSegmentation(snake_case_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
lowercase : Tuple = 480 if '''ade''' in checkpoint_url else 384
lowercase : List[str] = DPTImageProcessor(size=snake_case_ )
lowercase : Optional[int] = prepare_img()
lowercase : Dict = image_processor(snake_case_ ,return_tensors="""pt""" )
# forward pass
lowercase : int = model(**snake_case_ ).logits if '''ade''' in checkpoint_url else model(**snake_case_ ).predicted_depth
# Assert logits
lowercase : List[str] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase : int = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(snake_case_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,snake_case_ ,atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,snake_case_ )
)
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ ,snake_case_ ) ,organization="""nielsr""" ,commit_message="""Add model""" ,use_temp_dir=snake_case_ ,)
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ ,snake_case_ ) ,organization="""nielsr""" ,commit_message="""Add image processor""" ,use_temp_dir=snake_case_ ,)
if __name__ == "__main__":
lowerCAmelCase: List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you\'re pushing to the hub.",
)
lowerCAmelCase: List[Any] =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 607 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase__ : Tuple = TypeVar("T")
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) ->None:
UpperCAmelCase_ = None
UpperCAmelCase_ = len(UpperCAmelCase__ )
UpperCAmelCase_ = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ = fnc
self.build()
def lowerCAmelCase__ ( self : str ) ->None:
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) ->None:
p += self.N
UpperCAmelCase_ = v
while p > 1:
UpperCAmelCase_ = p // 2
UpperCAmelCase_ = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) ->T | None: # noqa: E741
UpperCAmelCase_ = l + self.N, r + self.N
UpperCAmelCase_ = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ = self.st[l] if res is None else self.fn(UpperCAmelCase__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ = self.st[r] if res is None else self.fn(UpperCAmelCase__ , self.st[r] )
UpperCAmelCase_ = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase__ : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase__ : str = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase__ : int = SegmentTree(test_array, min)
lowercase__ : Optional[int] = SegmentTree(test_array, max)
lowercase__ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
UpperCAmelCase_ = reduce(snake_case_ , test_array[i : j + 1] )
UpperCAmelCase_ = reduce(snake_case_ , test_array[i : j + 1] )
UpperCAmelCase_ = reduce(lambda _UpperCamelCase , _UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase__ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(snake_case_, snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = tmp_path / '''cache'''
__magic_name__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : int = JsonDatasetReader(snake_case_, cache_dir=snake_case_, keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_, snake_case_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
], )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : int = tmp_path / '''cache'''
__magic_name__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Tuple = features.copy() if features else default_expected_features
__magic_name__ : Union[str, Any] = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : List[str] = JsonDatasetReader(snake_case_, features=snake_case_, cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_, snake_case_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
], )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : str = tmp_path / '''cache'''
__magic_name__ : Optional[Any] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__magic_name__ : Union[str, Any] = features.copy() if features else default_expected_features
__magic_name__ : int = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Optional[Any] = JsonDatasetReader(snake_case_, features=snake_case_, cache_dir=snake_case_ ).read()
assert isinstance(snake_case_, snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : int = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__magic_name__ : Optional[Any] = features.copy()
__magic_name__ : Any = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Optional[Any] = tmp_path / '''cache'''
__magic_name__ : Union[str, Any] = JsonDatasetReader(snake_case_, features=snake_case_, cache_dir=snake_case_ ).read()
assert isinstance(snake_case_, snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = tmp_path / '''cache'''
__magic_name__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Any = JsonDatasetReader(snake_case_, cache_dir=snake_case_, split=snake_case_ ).read()
_check_json_dataset(snake_case_, snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""", [str, list] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if issubclass(snake_case_, snake_case_ ):
__magic_name__ : str = jsonl_path
elif issubclass(snake_case_, snake_case_ ):
__magic_name__ : int = [jsonl_path]
__magic_name__ : Optional[Any] = tmp_path / '''cache'''
__magic_name__ : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Union[str, Any] = JsonDatasetReader(snake_case_, cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_, snake_case_ )
def UpperCamelCase ( _A, _A, _A=("train",) ):
"""simple docstring"""
assert isinstance(snake_case_, snake_case_ )
for split in splits:
__magic_name__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = tmp_path / '''cache'''
__magic_name__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : int = JsonDatasetReader({"""train""": jsonl_path}, cache_dir=snake_case_, keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_, snake_case_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
], )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = tmp_path / '''cache'''
__magic_name__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : List[Any] = features.copy() if features else default_expected_features
__magic_name__ : Any = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : str = JsonDatasetReader({"""train""": jsonl_path}, features=snake_case_, cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_, snake_case_ )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
if split:
__magic_name__ : Dict = {split: jsonl_path}
else:
__magic_name__ : Any = '''train'''
__magic_name__ : Any = {'''train''': jsonl_path, '''test''': jsonl_path}
__magic_name__ : int = tmp_path / '''cache'''
__magic_name__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Union[str, Any] = JsonDatasetReader(snake_case_, cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_, snake_case_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase ( _A ):
"""simple docstring"""
return json.load(snake_case_ )
def UpperCamelCase ( _A ):
"""simple docstring"""
return [json.loads(snake_case_ ) for line in buffer]
class snake_case__ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
__magic_name__ : Optional[Any] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
__magic_name__ : int = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ : List[str] = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ : Union[str, Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
def __magic_name__ ( self , lowerCAmelCase__ ) -> int:
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = tmp_path_factory.mktemp("""data""" ) / F'test.json.{extension}'
__magic_name__ : Dict = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
__magic_name__ : Tuple = f.read()
with fsspec.open(lowerCAmelCase__ , """rb""" , compression="""infer""" ) as f:
__magic_name__ : Dict = f.read()
assert exported_content == original_content
| 324 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
SCREAMING_SNAKE_CASE = HfApi()
SCREAMING_SNAKE_CASE = {}
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
SCREAMING_SNAKE_CASE = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
SCREAMING_SNAKE_CASE = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
SCREAMING_SNAKE_CASE = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
SCREAMING_SNAKE_CASE = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
SCREAMING_SNAKE_CASE = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
SCREAMING_SNAKE_CASE = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
SCREAMING_SNAKE_CASE = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
SCREAMING_SNAKE_CASE = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
SCREAMING_SNAKE_CASE = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
SCREAMING_SNAKE_CASE = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
SCREAMING_SNAKE_CASE = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
SCREAMING_SNAKE_CASE = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
SCREAMING_SNAKE_CASE = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
SCREAMING_SNAKE_CASE = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
SCREAMING_SNAKE_CASE = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
SCREAMING_SNAKE_CASE = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
SCREAMING_SNAKE_CASE = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
SCREAMING_SNAKE_CASE = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 94 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __a ( UpperCamelCase_ ):
def __init__( self , a__ , a__ ):
_lowerCamelCase = params
_lowerCamelCase = np.array(a__ )
_lowerCamelCase = np.array([len(a__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , a__ ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def snake_case_ ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case_ ( self ):
_lowerCamelCase = self.params.max_model_input_size
_lowerCamelCase = self.lengths > max_len
logger.info(F'Splitting {sum(a__ )} too long sequences.' )
def divide_chunks(a__ , a__ ):
return [l[i : i + n] for i in range(0 , len(a__ ) , a__ )]
_lowerCamelCase = []
_lowerCamelCase = []
if self.params.mlm:
_lowerCamelCase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
_lowerCamelCase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCamelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCamelCase = np.insert(a__ , 0 , a__ )
if sub_s[-1] != sep_id:
_lowerCamelCase = np.insert(a__ , len(a__ ) , a__ )
assert len(a__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a__ )
new_tok_ids.extend(a__ )
new_lengths.extend([len(a__ ) for l in sub_seqs] )
_lowerCamelCase = np.array(a__ )
_lowerCamelCase = np.array(a__ )
def snake_case_ ( self ):
_lowerCamelCase = len(self )
_lowerCamelCase = self.lengths > 11
_lowerCamelCase = self.token_ids[indices]
_lowerCamelCase = self.lengths[indices]
_lowerCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def snake_case_ ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCamelCase = self.params.special_tok_ids['''unk_token''']
_lowerCamelCase = len(self )
_lowerCamelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCamelCase = (unk_occs / self.lengths) < 0.5
_lowerCamelCase = self.token_ids[indices]
_lowerCamelCase = self.lengths[indices]
_lowerCamelCase = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def snake_case_ ( self ):
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case_ ( self , a__ ):
_lowerCamelCase = [t[0] for t in batch]
_lowerCamelCase = [t[1] for t in batch]
assert len(a__ ) == len(a__ )
# Max for paddings
_lowerCamelCase = max(a__ )
# Pad token ids
if self.params.mlm:
_lowerCamelCase = self.params.special_tok_ids['''pad_token''']
else:
_lowerCamelCase = self.params.special_tok_ids['''unk_token''']
_lowerCamelCase = [list(t.astype(a__ ) ) + [pad_idx] * (max_seq_len_ - len(a__ )) for t in token_ids]
assert len(tk_ ) == len(a__ )
assert all(len(a__ ) == max_seq_len_ for t in tk_ )
_lowerCamelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCamelCase = torch.tensor(a__ ) # (bs)
return tk_t, lg_t
| 650 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowercase__ : Union[str, Any] = 'facebook/wmt19-en-de'
lowercase__ : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowercase__ : Tuple = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowercase__ : Any = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowercase__ : Optional[int] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : Optional[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
lowercase__ : List[Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[Any] = logging.get_logger(__name__)
__magic_name__ : Dict = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
lowercase_ : Union[str, Any] = "vit_msn"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : Dict=30_72 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : str=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[str]=1e-06 , __lowerCamelCase : Any=2_24 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : str , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
| 615 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__a :int = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
A_ = []
def parse_line(__UpperCamelCase : List[str] ):
for line in fp:
if isinstance(snake_case_ ,snake_case_ ):
A_ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case_ ) > 0:
A_ = '''\n'''.join(snake_case_ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(snake_case_ )
buffer.clear()
continue
else:
A_ = line.strip()
buffer.append(snake_case_ )
if from_gh:
for filename in os.listdir(snake_case_ ):
A_ = os.path.join(snake_case_ ,snake_case_ )
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case_ ) as fp:
parse_line(snake_case_ )
else:
try:
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case_ ) as fp:
parse_line(snake_case_ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = set()
A_ = [os.path.join(snake_case_ ,snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case_ ,snake_case_ ) )
return selected_warnings
if __name__ == "__main__":
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return values.split("," )
__a :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__a :Tuple = parser.parse_args()
__a :List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__a :Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__a :Optional[Any] = extract_warnings(args.output_dir, args.targets)
__a :Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 86 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase_ ( __a ) -> str:
a__ : List[str] = {}
a__ : List[str] = job['''started_at''']
a__ : int = job['''completed_at''']
a__ : str = date_parser.parse(snake_case_ )
a__ : Tuple = date_parser.parse(snake_case_ )
a__ : List[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
a__ : List[Any] = start
a__ : List[str] = end
a__ : List[str] = duration_in_min
return job_info
def UpperCamelCase_ ( __a , __a=None ) -> Optional[Any]:
a__ : str = None
if token is not None:
a__ : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
a__ : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
a__ : Tuple = requests.get(snake_case_ , headers=snake_case_ ).json()
a__ : Dict = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(snake_case_ ) for job in result["jobs"]} )
a__ : int = math.ceil((result["total_count"] - 100) / 100 )
for i in range(snake_case_ ):
a__ : Optional[Any] = requests.get(url + f'''&page={i + 2}''' , headers=snake_case_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(snake_case_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
UpperCamelCase : Any = parser.parse_args()
UpperCamelCase : Union[str, Any] = get_job_time(args.workflow_run_id)
UpperCamelCase : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __lowerCamelCase ( __a :List[str] , __a :Optional[Any] , __a :Union[str, Any] , __a :Optional[int] , __a :Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(snake_case_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
A__ = pipeline.text_encoder
else:
A__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(snake_case_ ) > -1:
try:
A__ = curr_layer.__getattr__(snake_case_ )
if len(snake_case_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(snake_case_ ) == 0:
break
except Exception:
if len(snake_case_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(snake_case_ )
else:
pair_keys.append(snake_case_ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ )
# update visited list
for item in pair_keys:
visited.append(snake_case_ )
return pipeline
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
A : Optional[int] = parser.parse_args()
A : Optional[int] = args.base_model_path
A : Optional[int] = args.checkpoint_path
A : str = args.dump_path
A : List[str] = args.lora_prefix_unet
A : Dict = args.lora_prefix_text_encoder
A : int = args.alpha
A : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A : Any = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 176 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
def __snake_case ( __A ) -> Dict:
lowercase : Optional[Any] = int(snake_case_ )
if n_element < 1:
lowercase : Union[str, Any] = ValueError("""a should be a positive number""" )
raise my_error
lowercase : Any = [1]
lowercase : Optional[int] = (0, 0, 0)
lowercase : Optional[int] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase: List[str] =input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
lowerCAmelCase: List[str] =hamming(int(n))
print("-----------------------------------------------------")
print(F'The list with nth numbers is: {hamming_numbers}')
print("-----------------------------------------------------")
| 607 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
__a ="linear"
__a ="cosine"
__a ="cosine_with_restarts"
__a ="polynomial"
__a ="constant"
__a ="constant_with_warmup"
__a ="piecewise_constant"
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: int = -1 ) -> Optional[int]:
"""simple docstring"""
return LambdaLR(snake_case_, lambda SCREAMING_SNAKE_CASE__ : 1, last_epoch=snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE__: int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1.0, snake_case_ ) )
return 1.0
return LambdaLR(snake_case_, snake_case_, last_epoch=snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int = -1 ) -> Optional[int]:
"""simple docstring"""
__a = {}
__a = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__a = rule_str.split(':' )
__a = int(snake_case_ )
__a = float(snake_case_ )
__a = value
__a = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: int ):
def rule_func(SCREAMING_SNAKE_CASE__: int ) -> float:
__a = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a = create_rules_function(snake_case_, snake_case_ )
return LambdaLR(snake_case_, snake_case_, last_epoch=snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Tuple, SCREAMING_SNAKE_CASE__: Dict=-1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE__: int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1, snake_case_ ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case_, snake_case_, snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: float = 0.5, SCREAMING_SNAKE_CASE__: int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE__: Tuple ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1, snake_case_ ) )
__a = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(snake_case_ ) * 2.0 * progress )) )
return LambdaLR(snake_case_, snake_case_, snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: int = 1, SCREAMING_SNAKE_CASE__: int = -1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(SCREAMING_SNAKE_CASE__: Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1, snake_case_ ) )
__a = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case_ ) * progress) % 1.0) )) )
return LambdaLR(snake_case_, snake_case_, snake_case_ )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: Dict, SCREAMING_SNAKE_CASE__: Any, SCREAMING_SNAKE_CASE__: Tuple=1e-7, SCREAMING_SNAKE_CASE__: Dict=1.0, SCREAMING_SNAKE_CASE__: int=-1 ) -> Optional[Any]:
"""simple docstring"""
__a = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE__: int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1, snake_case_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a = lr_init - lr_end
__a = num_training_steps - num_warmup_steps
__a = 1 - (current_step - num_warmup_steps) / decay_steps
__a = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case_, snake_case_, snake_case_ )
__UpperCamelCase : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, SchedulerType], SCREAMING_SNAKE_CASE__: Optimizer, SCREAMING_SNAKE_CASE__: Optional[str] = None, SCREAMING_SNAKE_CASE__: Optional[int] = None, SCREAMING_SNAKE_CASE__: Optional[int] = None, SCREAMING_SNAKE_CASE__: int = 1, SCREAMING_SNAKE_CASE__: float = 1.0, SCREAMING_SNAKE_CASE__: int = -1, ) -> Any:
"""simple docstring"""
__a = SchedulerType(snake_case_ )
__a = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case_, last_epoch=snake_case_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case_, step_rules=snake_case_, last_epoch=snake_case_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case_, num_warmup_steps=snake_case_, last_epoch=snake_case_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case_, num_warmup_steps=snake_case_, num_training_steps=snake_case_, num_cycles=snake_case_, last_epoch=snake_case_, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case_, num_warmup_steps=snake_case_, num_training_steps=snake_case_, power=snake_case_, last_epoch=snake_case_, )
return schedule_func(
snake_case_, num_warmup_steps=snake_case_, num_training_steps=snake_case_, last_epoch=snake_case_ ) | 448 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
UpperCAmelCase_ = hex_num[0] == '''-'''
if is_negative:
UpperCAmelCase_ = hex_num[1:]
try:
UpperCAmelCase_ = int(snake_case_ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
UpperCAmelCase_ = ''''''
while int_num > 0:
UpperCAmelCase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__: int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__magic_name__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 324 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase_ ( __A : Dataset , __A : Dict[str, str] ) -> int:
"""simple docstring"""
lowercase : List[Any] =args.log_outputs
lowercase : Optional[int] ='''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase : Dict =load_metric('''wer''' )
lowercase : Optional[int] =load_metric('''cer''' )
# compute metrics
lowercase : Optional[Any] =wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase : List[str] =cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase : Tuple =F'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(F'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase : Tuple =F'log_{dataset_id}_predictions.txt'
lowercase : Any =F'log_{dataset_id}_targets.txt'
with open(snake_case_ , '''w''' ) as p, open(snake_case_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(__A : Optional[Any] , __A : Optional[Any] ):
p.write(F'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(snake_case_ , with_indices=snake_case_ )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] ='''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase : Optional[Any] =re.sub(snake_case_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase : Tuple =['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase : str =''' '''.join(text.split(snake_case_ ) )
return text
def lowercase_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase : Dict =AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase : Dict =feature_extractor.sampling_rate
# resample audio
lowercase : Tuple =dataset.cast_column('''audio''' , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
lowercase : Tuple =0 if torch.cuda.is_available() else -1
lowercase : int =pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__A : Dict ):
lowercase : Optional[Any] =asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase : int =prediction['''text''']
lowercase : Dict =normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase : Tuple =dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 94 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
"""simple docstring"""
from __future__ import annotations
class __a :
def __init__( self , a__ ):
_lowerCamelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(a__ ) != 0:
_lowerCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a__ ) != cols:
raise error
for value in row:
if not isinstance(a__ , (int, float) ):
raise error
_lowerCamelCase = rows
else:
_lowerCamelCase = []
def snake_case_ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def snake_case_ ( self ):
return len(self.rows )
@property
def snake_case_ ( self ):
return len(self.rows[0] )
@property
def snake_case_ ( self ):
return (self.num_rows, self.num_columns)
@property
def snake_case_ ( self ):
return self.order[0] == self.order[1]
def snake_case_ ( self ):
_lowerCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a__ )
def snake_case_ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def snake_case_ ( self ):
return bool(self.determinant() )
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a__ ).determinant()
def snake_case_ ( self , a__ , a__ ):
if (row + column) % 2 == 0:
return self.get_minor(a__ , a__ )
return -1 * self.get_minor(a__ , a__ )
def snake_case_ ( self ):
return Matrix(
[
[self.get_minor(a__ , a__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def snake_case_ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def snake_case_ ( self ):
_lowerCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(a__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(a__ , a__ ):
raise type_error
for value in row:
if not isinstance(a__ , (int, float) ):
raise type_error
if len(a__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(a__ )
else:
_lowerCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(a__ , a__ ):
raise type_error
for value in column:
if not isinstance(a__ , (int, float) ):
raise type_error
if len(a__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowerCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowerCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , a__ ):
if not isinstance(a__ , a__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , a__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , a__ ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , a__ ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , a__ ):
if isinstance(a__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a__ , a__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(a__ , a__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , a__ ):
if not isinstance(a__ , a__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowerCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def snake_case_ ( cls , a__ , a__ ):
return sum(row[i] * column[i] for i in range(len(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Union[str, Any] ) -> Union[str, Any]:
__A : str = XCLIPTextConfig()
# derive patch size from model name
__A : Optional[int] = model_name.find('patch' )
__A : Optional[Any] = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
__A : int = XCLIPVisionConfig(patch_size=snake_case_ , num_frames=snake_case_ )
if "large" in model_name:
__A : Union[str, Any] = 7_68
__A : Dict = 30_72
__A : str = 12
__A : Any = 10_24
__A : Optional[int] = 40_96
__A : Optional[int] = 16
__A : Tuple = 24
__A : List[str] = 7_68
__A : int = 30_72
if model_name == "xclip-large-patch14-16-frames":
__A : Optional[int] = 3_36
__A : Optional[int] = XCLIPConfig.from_text_vision_configs(snake_case_ , snake_case_ )
if "large" in model_name:
__A : Dict = 7_68
return config
def _lowerCAmelCase ( __snake_case : List[Any] ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
__A : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
__A : Dict = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
__A : List[str] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
__A : str = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
__A : Optional[Any] = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
__A : Tuple = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
__A : str = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
__A : Dict = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
__A : Any = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
__A : Any = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
__A : int = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
__A : Optional[Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
__A : Union[str, Any] = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
__A : str = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
__A : Tuple = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
__A : Tuple = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
__A : Dict = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
__A : Optional[int] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
__A : Tuple = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
__A : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
__A : int = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
__A : List[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : int ) -> Any:
for key in orig_state_dict.copy().keys():
__A : List[Any] = orig_state_dict.pop(snake_case_ )
if "attn.in_proj" in key:
__A : List[Any] = key.split('.' )
if key.startswith('visual' ):
__A : Dict = key_split[3]
__A : Optional[int] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__A : Optional[int] = val[
:dim, :
]
__A : List[Any] = val[
dim : dim * 2, :
]
__A : Dict = val[
-dim:, :
]
else:
__A : List[Any] = val[
:dim
]
__A : Dict = val[
dim : dim * 2
]
__A : List[str] = val[
-dim:
]
else:
if "weight" in key:
__A : Dict = val[
:dim, :
]
__A : Tuple = val[
dim : dim * 2, :
]
__A : Dict = val[
-dim:, :
]
else:
__A : Optional[Any] = val[:dim]
__A : int = val[
dim : dim * 2
]
__A : Tuple = val[-dim:]
elif key.startswith('mit' ):
__A : List[str] = key_split[2]
__A : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__A : Any = val[:dim, :]
__A : Optional[int] = val[dim : dim * 2, :]
__A : Optional[Any] = val[-dim:, :]
else:
__A : Tuple = val[:dim]
__A : int = val[dim : dim * 2]
__A : Any = val[-dim:]
else:
__A : int = key_split[2]
__A : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
__A : Dict = val[:dim, :]
__A : Optional[int] = val[
dim : dim * 2, :
]
__A : Tuple = val[-dim:, :]
else:
__A : Tuple = val[:dim]
__A : Any = val[
dim : dim * 2
]
__A : Union[str, Any] = val[-dim:]
else:
__A : int = rename_key(snake_case_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__A : List[Any] = val.T
__A : List[str] = val
return orig_state_dict
def _lowerCAmelCase ( __snake_case : Tuple ) -> Optional[int]:
if num_frames == 8:
__A : List[str] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__A : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__A : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
__A : Tuple = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case_ , repo_type='dataset' , )
__A : Optional[Any] = np.load(snake_case_ )
return list(snake_case_ )
def _lowerCAmelCase ( __snake_case : str , __snake_case : Tuple=None , __snake_case : List[str]=False ) -> Dict:
__A : List[str] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__A : int = model_to_url[model_name]
__A : Optional[int] = 8
if "16-frames" in model_name:
__A : Optional[Any] = 16
elif "shot" in model_name:
__A : List[Any] = 32
__A : Union[str, Any] = get_xclip_config(snake_case_ , snake_case_ )
__A : List[Any] = XCLIPModel(snake_case_ )
model.eval()
if "drive" in checkpoint_url:
__A : int = '''pytorch_model.bin'''
gdown.cached_download(snake_case_ , snake_case_ , quiet=snake_case_ )
__A : List[str] = torch.load(snake_case_ , map_location='cpu' )['''model''']
else:
__A : Dict = torch.hub.load_state_dict_from_url(snake_case_ )['''model''']
__A : Optional[Any] = convert_state_dict(snake_case_ , snake_case_ )
__A : Dict = XCLIPModel(snake_case_ )
__A : str = model.load_state_dict(snake_case_ , strict=snake_case_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__A : Tuple = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24
__A : List[Any] = VideoMAEImageProcessor(size=snake_case_ )
__A : Optional[int] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
__A : List[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
__A : Optional[Any] = XCLIPProcessor(image_processor=snake_case_ , tokenizer=snake_case_ )
__A : Tuple = prepare_video(snake_case_ )
__A : Dict = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case_ , return_tensors='pt' , padding=snake_case_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
__A : List[Any] = model(**snake_case_ )
# Verify outputs
__A : int = outputs.logits_per_video
__A : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case_ )
# kinetics-400
if model_name == "xclip-base-patch32":
__A : List[Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
__A : List[str] = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
__A : str = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
__A : List[Any] = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
__A : Any = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
__A : Dict = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__A : int = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__A : List[Any] = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__A : List[Any] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__A : List[str] = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__A : Union[str, Any] = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__A : Union[str, Any] = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__A : str = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__A : Union[str, Any] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__A : Union[str, Any] = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__A : Optional[int] = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__A : Any = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__A : str = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case_ , organization='nielsr' )
processor.push_to_hub(snake_case_ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case_ , organization='nielsr' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase__ : List[str] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 8 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int=7 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=30 , __lowerCamelCase : Union[str, Any]=4_00 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=0.9 , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 30}
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize_and_center_crop
lowerCAmelCase__ = size
lowerCAmelCase__ = crop_pct
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def A__ ( self : int ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ , unittest.TestCase ):
lowercase_ : Any = PoolFormerImageProcessor if is_vision_available() else None
def A__ ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = PoolFormerImageProcessingTester(self )
@property
def A__ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def A__ ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def A__ ( self : Dict ):
"""simple docstring"""
pass
def A__ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A__ ( self : Tuple ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def A__ ( self : Dict ):
"""simple docstring"""
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 615 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
import os
import pytest
from attr import dataclass
__a :Optional[Any] = 'us-east-1' # defaults region
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : Any = 4_2
_lowerCamelCase : Dict = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_lowerCamelCase : List[Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
_lowerCamelCase : Optional[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __A ( self : Optional[int] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __A ( self : Optional[int] ):
return f'''{self.framework}-transfromers-test'''
@property
def __A ( self : int ):
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def __A ( self : Tuple ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
UpperCamelCase : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 37 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
A : str = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_2_8,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 5_0,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 1_0,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 1_0,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class A (unittest.TestCase ):
'''simple docstring'''
@classmethod
def a_ ( cls : Tuple ) -> Dict:
"""simple docstring"""
A__ = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def a_ ( cls : int ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
A__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , repo_id="""test-config""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
A__ = BertConfig.from_pretrained(f'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
A__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
A__ = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-config-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
A__ = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
CustomConfig.register_for_auto_class()
A__ = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
A__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
A__ = c.n_embd + 1 # int
A__ = c.resid_pdrop + 1.0 # float
A__ = not c.scale_attn_weights # bool
A__ = c.summary_type + '''foo''' # str
c.update_from_string(
f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(__lowerCAmelCase , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__lowerCAmelCase , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__lowerCAmelCase , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__lowerCAmelCase , c.summary_type , """mismatch for key: summary_type""" )
def a_ ( self : Any ) -> List[Any]:
"""simple docstring"""
A__ = PretrainedConfig()
A__ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__lowerCAmelCase , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
A__ = [key for key, value in config_common_kwargs.items() if value == getattr(__lowerCAmelCase , __lowerCAmelCase )]
if len(__lowerCAmelCase ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f' {", ".join(__lowerCAmelCase )}.' )
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
A__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__lowerCAmelCase )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = mock.Mock()
A__ = 5_00
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__lowerCAmelCase ) as mock_head:
A__ = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = AutoConfig.from_pretrained("""bert-base-cased""" )
A__ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__lowerCAmelCase )
A__ = 2
json.dump(configuration.to_dict() , open(os.path.join(__lowerCAmelCase , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
A__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
A__ = ['''config.42.0.0.json''']
A__ = 7_68
configuration.save_pretrained(__lowerCAmelCase )
shutil.move(os.path.join(__lowerCAmelCase , """config.4.0.0.json""" ) , os.path.join(__lowerCAmelCase , """config.42.0.0.json""" ) )
A__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def a_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
A__ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
A__ = '''v4.0.0'''
A__ = new_transformers.models.auto.AutoConfig.from_pretrained(
__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__lowerCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
A__ = '''v3.0.0'''
A__ = old_transformers.models.auto.AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 176 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCAmelCase: List[Any] =logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCamelCase_ ):
def __init__( self , *snake_case , **snake_case ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case , )
super().__init__(*snake_case , **snake_case )
| 607 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ) ->None:
'''simple docstring'''
__a = row, column
__a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )]
def __str__( self ) ->str:
'''simple docstring'''
__a = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(lowerCamelCase , len(str(lowerCamelCase ) ) )
__a = F"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCamelCase ) -> str:
nonlocal string_format_identifier
__a = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self ) ->str:
'''simple docstring'''
return str(self )
def __UpperCamelCase ( self , lowerCamelCase ) ->bool:
'''simple docstring'''
if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase ) ->Any:
'''simple docstring'''
assert self.validate_indicies(lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase , lowerCamelCase ) ->None:
'''simple docstring'''
assert self.validate_indicies(lowerCamelCase )
__a = value
def __add__( self , lowerCamelCase ) ->Matrix:
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ) ->Matrix:
'''simple docstring'''
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , lowerCamelCase ) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , lowerCamelCase ) ->Matrix:
'''simple docstring'''
if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = F"""Unsupported type given for another ({type(lowerCamelCase )})"""
raise TypeError(lowerCamelCase )
def __UpperCamelCase ( self ) ->Matrix:
'''simple docstring'''
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
# a^(-1)
__a = Matrix(3, 3, 0 )
for i in range(3 ):
__a = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__a = Matrix(3, 1, 0 )
__a = 1, 2, -3
__a = Matrix(3, 1, 0 )
__a = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_, snake_case_ )}""" )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
import doctest
doctest.testmod()
testa() | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=13 , UpperCAmelCase__ : Dict=64 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Union[str, Any]=10 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : str=[1, 16, 4, 4] , UpperCAmelCase__ : int=None , ) ->List[str]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase_ = (self.image_size // 32) ** 2
UpperCAmelCase_ = num_patches + 1
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
UpperCAmelCase_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) ->List[str]:
UpperCAmelCase_ = ViTHybridModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) ->str:
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = ViTHybridForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCAmelCase__ ( self : int ) ->int:
UpperCAmelCase_ = ViTHybridModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(UpperCAmelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->str:
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(UpperCAmelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=UpperCAmelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase_ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ViTHybridModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
UpperCAmelCase_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**UpperCAmelCase__ )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
UpperCAmelCase_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
UpperCAmelCase_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' )
UpperCAmelCase_ = model(**UpperCAmelCase__ )
UpperCAmelCase_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 390 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__magic_name__: int = logging.get_logger(__name__)
class snake_case__ ( UpperCamelCase_ ):
lowercase__ : Any = '''AutoTokenizer'''
lowercase__ : Any = ['''tokenizer''']
lowercase__ : Tuple = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
super().__init__(lowerCAmelCase__ )
__magic_name__ : Tuple = speaker_embeddings
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ , lowerCAmelCase__="speaker_embeddings_path.json" , **lowerCAmelCase__ ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
__magic_name__ : Dict = get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase__ ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase__ ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase__ ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase__ ) , revision=kwargs.pop("""revision""" , lowerCAmelCase__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__magic_name__ : Dict = None
else:
with open(lowerCAmelCase__ ) as speaker_embeddings_json:
__magic_name__ : List[Any] = json.load(lowerCAmelCase__ )
else:
__magic_name__ : Optional[Any] = None
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(tokenizer=lowerCAmelCase__ , speaker_embeddings=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="speaker_embeddings_path.json" , lowerCAmelCase__="speaker_embeddings" , lowerCAmelCase__ = False , **lowerCAmelCase__ , ) -> List[Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , """v2""" ) , exist_ok=lowerCAmelCase__ )
__magic_name__ : Tuple = {}
__magic_name__ : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__magic_name__ : Tuple = self._load_voice_preset(lowerCAmelCase__ )
__magic_name__ : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowerCAmelCase__ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase__ , )
__magic_name__ : List[Any] = os.path.join(lowerCAmelCase__ , F'{prompt_key}_{key}.npy' )
__magic_name__ : Dict = tmp_dict
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , """w""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
super().save_pretrained(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Union[str, Any] = self.speaker_embeddings[voice_preset]
__magic_name__ : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__magic_name__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowerCAmelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , lowerCAmelCase__ ) , force_download=kwargs.pop("""force_download""" , lowerCAmelCase__ ) , proxies=kwargs.pop("""proxies""" , lowerCAmelCase__ ) , resume_download=kwargs.pop("""resume_download""" , lowerCAmelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , lowerCAmelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowerCAmelCase__ ) , revision=kwargs.pop("""revision""" , lowerCAmelCase__ ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__magic_name__ : int = np.load(lowerCAmelCase__ )
return voice_preset_dict
def __magic_name__ ( self , lowerCAmelCase__ = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="pt" , lowerCAmelCase__=2_56 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> List[Any]:
if voice_preset is not None and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__magic_name__ : List[Any] = self._load_voice_preset(lowerCAmelCase__ )
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and not voice_preset.endswith(""".npz""" ):
__magic_name__ : Tuple = voice_preset + '''.npz'''
__magic_name__ : Union[str, Any] = np.load(lowerCAmelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
__magic_name__ : Any = self.tokenizer(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , padding="""max_length""" , max_length=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
if voice_preset is not None:
__magic_name__ : Dict = voice_preset
return encoded_text
| 324 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : str =torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
lowercase : Any =torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
lowercase : List[str] =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
lowercase : str ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase : Union[str, Any] =sd.pop(snake_case_ )
lowercase : int =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase : int =sd[key]
# We split QKV in separate Q,K,V
lowercase : Optional[Any] =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
lowercase : Optional[int] =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
lowercase : List[str] =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
lowercase : Optional[int] =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase : List[str] =torch.split(snake_case_ , depth // 3 , dim=0 )
lowercase : List[Any] =q
lowercase : Any =k
lowercase : Optional[Any] =v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( __A : Optional[int] , __A : Optional[int] , __A : Tuple=None ) -> Dict:
"""simple docstring"""
lowercase : List[str] =load_checkpoint(snake_case_ )
if config is not None:
lowercase : Any =OPTConfig.from_pretrained(snake_case_ )
else:
lowercase : Optional[int] =OPTConfig()
lowercase : Union[str, Any] =OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 94 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A_ : str =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : torch.nn.Module , snake_case : BnbQuantizationConfig , snake_case : Union[str, os.PathLike] = None , snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , snake_case : Optional[List[str]] = None , snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , )-> Optional[Any]:
_lowerCamelCase = bnb_quantization_config.load_in_abit
_lowerCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_lowerCamelCase = []
# custom device map
if isinstance(snake_case_ , snake_case_ ) and len(device_map.keys() ) > 1:
_lowerCamelCase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCamelCase = get_keys_to_not_convert(snake_case_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case_ )
_lowerCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCamelCase = []
_lowerCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case_ )
# compatibility with peft
_lowerCamelCase = load_in_abit
_lowerCamelCase = load_in_abit
_lowerCamelCase = get_parameter_device(snake_case_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_lowerCamelCase = replace_with_bnb_layers(snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
# convert param to the right dtype
_lowerCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCamelCase = name.replace('.weight' , '' ).replace('.bias' , '' )
_lowerCamelCase = getattr(snake_case_ , snake_case_ , snake_case_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case_ ):
param.to(snake_case_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
_lowerCamelCase = replace_with_bnb_layers(
snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ )
_lowerCamelCase = get_quantized_model_device_map(
snake_case_ , snake_case_ , snake_case_ , max_memory=snake_case_ , no_split_module_classes=snake_case_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCamelCase = True
_lowerCamelCase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
snake_case_ , snake_case_ , snake_case_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case_ , offload_state_dict=snake_case_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case_ , device_map=snake_case_ , offload_dir=snake_case_ )
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : str=None , snake_case : List[str]=None , snake_case : int=None )-> List[Any]:
if device_map is None:
if torch.cuda.is_available():
_lowerCamelCase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(snake_case_ , snake_case_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_lowerCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCamelCase = {}
_lowerCamelCase = special_dtypes
_lowerCamelCase = no_split_module_classes
_lowerCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCamelCase = get_balanced_memory(
snake_case_ , low_zero=(device_map == 'balanced_low_0') , max_memory=snake_case_ , **snake_case_ , )
_lowerCamelCase = max_memory
_lowerCamelCase = infer_auto_device_map(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
# check if don't have any quantized module on the cpu
_lowerCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Any , snake_case : List[Any]=None , snake_case : Dict=None )-> str:
if modules_to_not_convert is None:
_lowerCamelCase = []
_lowerCamelCase = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Optional[int]=None , snake_case : List[str]=None , )-> Tuple:
_lowerCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCamelCase = []
current_key_name.append(snake_case_ )
if isinstance(snake_case_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCamelCase = '''.'''.join(snake_case_ )
_lowerCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_lowerCamelCase = module.weight.data
if module.bias is not None:
_lowerCamelCase = module.bias.data
bnb_module.requires_grad_(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
_lowerCamelCase = True
if len(list(module.children() ) ) > 0:
_lowerCamelCase = _replace_with_bnb_layers(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_lowerCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> int:
# Create a copy of the model
with init_empty_weights():
_lowerCamelCase = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCamelCase = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_ , snake_case_ ):
_lowerCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCamelCase = sum(snake_case_ , [] )
_lowerCamelCase = len(snake_case_ ) > 0
# Check if it is a base model
_lowerCamelCase = False
if hasattr(snake_case_ , 'base_model_prefix' ):
_lowerCamelCase = not hasattr(snake_case_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCamelCase = list(model.named_children() )
_lowerCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCamelCase = set(snake_case_ ) - set(snake_case_ )
_lowerCamelCase = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
_lowerCamelCase = ['''.weight''', '''.bias''']
_lowerCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCamelCase = name.replace(snake_case_ , '' )
filtered_module_names.append(snake_case_ )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( snake_case : Any )-> int:
for m in model.modules():
if isinstance(snake_case_ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( snake_case : nn.Module )-> Tuple:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : List[str] , snake_case : int , snake_case : Dict , snake_case : Tuple , snake_case : Any , snake_case : str )-> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case_ , snake_case_ , 0 , dtype=snake_case_ , value=snake_case_ )
_lowerCamelCase = param_name
_lowerCamelCase = model
if "." in tensor_name:
_lowerCamelCase = tensor_name.split('.' )
for split in splits[:-1]:
_lowerCamelCase = getattr(snake_case_ , snake_case_ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
_lowerCamelCase = new_module
_lowerCamelCase = splits[-1]
# offload weights
_lowerCamelCase = False
offload_weight(module._parameters[tensor_name] , snake_case_ , snake_case_ , index=snake_case_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , snake_case_ , index=snake_case_ , )
else:
offload_weight(snake_case_ , snake_case_ , snake_case_ , index=snake_case_ )
offload_weight(snake_case_ , param_name.replace('weight' , 'SCB' ) , snake_case_ , index=snake_case_ )
set_module_tensor_to_device(snake_case_ , snake_case_ , 'meta' , dtype=snake_case_ , value=torch.empty(*param.size() ) )
| 650 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> Optional[int]:
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError('Input value must be an \'int\' type' )
__A : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase_ : Tuple = 42
# setable values
lowercase_ : int = 42
lowercase_ : Dict = 42
lowercase_ : str = None
@classmethod
def A__ ( cls : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
return cls(common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
lowercase_ : str = 42
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ : Optional[int] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ : Dict = 42
@property
def A__ ( self : Any ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[str] , __lowerCamelCase : Tuple = 10_00 , __lowerCamelCase : Dict = 0.0001 , __lowerCamelCase : str = 0.02 , __lowerCamelCase : Optional[int] = "linear" , __lowerCamelCase : int = None , __lowerCamelCase : Optional[Any] = "fixed_small" , __lowerCamelCase : int = True , __lowerCamelCase : List[str] = "epsilon" , __lowerCamelCase : str = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase__ = dtype
def A__ ( self : str , __lowerCamelCase : Dict = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase__ = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase , init_noise_sigma=__lowerCamelCase , timesteps=__lowerCamelCase , )
def A__ ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] = None ):
"""simple docstring"""
return sample
def A__ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any = () ):
"""simple docstring"""
lowerCAmelCase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ = (jnp.arange(0 , __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase , )
def A__ ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = state.common.alphas_cumprod[t]
lowerCAmelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase__ = jnp.clip(__lowerCamelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase__ = jnp.log(jnp.clip(__lowerCamelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase__ = variance
lowerCAmelCase__ = state.common.betas[t]
lowerCAmelCase__ = (predicted_variance + 1) / 2
lowerCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def A__ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] = None , __lowerCamelCase : int = True , ):
"""simple docstring"""
lowerCAmelCase__ = timestep
if key is None:
lowerCAmelCase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ = jnp.split(__lowerCamelCase , sample.shape[1] , axis=1 )
else:
lowerCAmelCase__ = None
# 1. compute alphas, betas
lowerCAmelCase__ = state.common.alphas_cumprod[t]
lowerCAmelCase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase__ = 1 - alpha_prod_t
lowerCAmelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase__ = jnp.clip(__lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase__ = jax.random.split(__lowerCamelCase , num=1 )
lowerCAmelCase__ = jax.random.normal(__lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__lowerCamelCase , __lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
lowerCAmelCase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase , state=__lowerCamelCase )
def A__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , ):
"""simple docstring"""
return add_noise_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def A__ ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , ):
"""simple docstring"""
return get_velocity_common(state.common , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __len__( self : List[str] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 615 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a :Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = XGLMTokenizer
_lowerCamelCase : List[Any] = XGLMTokenizerFast
_lowerCamelCase : Dict = True
_lowerCamelCase : int = True
def __A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Dict ):
A_ = '''<pad>'''
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : int ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(UpperCAmelCase ) , 1008 )
def __A ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self : str ):
A_ = XGLMTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __A ( self : Any ):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __A ( self : List[str] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase , f.name )
A_ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase )
A_ = pickle.dumps(UpperCAmelCase )
pickle.loads(UpperCAmelCase )
def __A ( self : Dict ):
if not self.test_rust_tokenizer:
return
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = '''I was born in 92000, and this is falsé.'''
A_ = tokenizer.tokenize(UpperCAmelCase )
A_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Optional[Any] ):
A_ = '''Hello World!'''
A_ = [2, 31227, 4447, 35]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Tuple ):
A_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
A_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def __A ( self : Any ):
# fmt: off
A_ = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="facebook/xglm-564M" , padding=UpperCAmelCase , ) | 86 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _UpperCamelCase( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase( self : Optional[int] ):
a__ : Dict = ort.SessionOptions()
a__ : List[str] = False
return options
def _UpperCamelCase( self : Dict ):
a__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
a__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
a__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
a__ : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Dict = '''A red cat sitting on a park bench'''
a__ : Optional[Any] = np.random.RandomState(0 )
a__ : Any = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCamelCase__ , output_type="np" , )
a__ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 37 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : str=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Tuple=99 , __lowerCAmelCase : str=32 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=50 , __lowerCAmelCase : Optional[Any]=0.0_2 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=None , ) -> Optional[int]:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = use_labels
A__ = scope
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def a_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
(
A__
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
A__ = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , **__lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
A__ = True
A__ = BertGenerationEncoder(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
A__ = True
A__ = True
A__ = BertGenerationDecoder(config=__lowerCAmelCase ).to(__lowerCAmelCase ).eval()
# first forward pass
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase , )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([input_mask, next_mask] , dim=-1 )
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) )
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , *__lowerCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
A__ = BertGenerationDecoder(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Tuple ) -> Any:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : List[str] = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = BertGenerationEncoderTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def a_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Any ) -> Any:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
A__ = '''bert'''
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__lowerCAmelCase )
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowerCAmelCase )
def a_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
(
A__
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
def a_ ( self : List[str] ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
A__ = model(__lowerCAmelCase )[0]
A__ = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
A__ = model(__lowerCAmelCase )[0]
A__ = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , __lowerCAmelCase )
A__ = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 176 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self , snake_case ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowercase : List[str] = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(snake_case )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
lowercase : str = '''sshleifer/tiny-gpt2'''
lowercase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : Tuple = PyTorchBenchmark(snake_case )
lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = '''sgugger/tiny-distilbert-classification'''
lowercase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , only_pretrain_model=snake_case , )
lowercase : str = PyTorchBenchmark(snake_case )
lowercase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase : int = '''sshleifer/tiny-gpt2'''
lowercase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , torchscript=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : Any = PyTorchBenchmark(snake_case )
lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : Dict = '''sshleifer/tiny-gpt2'''
lowercase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , fpaa=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : List[Any] = PyTorchBenchmark(snake_case )
lowercase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
lowercase : List[Any] = '''sshleifer/tiny-gpt2'''
lowercase : str = AutoConfig.from_pretrained(snake_case )
# set architectures equal to `None`
lowercase : Optional[Any] = None
lowercase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : int = PyTorchBenchmark(snake_case , configs=[config] )
lowercase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowercase : int = '''sshleifer/tiny-gpt2'''
lowercase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : List[Any] = PyTorchBenchmark(snake_case )
lowercase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can\'t do half precision""" )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : int = '''sshleifer/tiny-gpt2'''
lowercase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=snake_case , multi_process=snake_case , )
lowercase : Optional[int] = PyTorchBenchmark(snake_case )
lowercase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = '''sshleifer/tiny-gpt2'''
lowercase : List[Any] = AutoConfig.from_pretrained(snake_case )
lowercase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : int = PyTorchBenchmark(snake_case , configs=[config] )
lowercase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : str = '''sshleifer/tinier_bart'''
lowercase : List[str] = AutoConfig.from_pretrained(snake_case )
lowercase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : str = PyTorchBenchmark(snake_case , configs=[config] )
lowercase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : Any = '''sshleifer/tiny-gpt2'''
lowercase : int = AutoConfig.from_pretrained(snake_case )
lowercase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : Any = PyTorchBenchmark(snake_case , configs=[config] )
lowercase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
lowercase : int = '''sshleifer/tinier_bart'''
lowercase : int = AutoConfig.from_pretrained(snake_case )
lowercase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
lowercase : List[str] = PyTorchBenchmark(snake_case , configs=[config] )
lowercase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowercase : int = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , save_to_csv=snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(snake_case , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(snake_case , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(snake_case , """train_time.csv""" ) , env_info_csv_file=os.path.join(snake_case , """env.csv""" ) , multi_process=snake_case , )
lowercase : List[Any] = PyTorchBenchmark(snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , """env.csv""" ) ).exists() )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowercase : List[str] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(snake_case ):
self.assertTrue(hasattr(snake_case , """sequential""" ) )
self.assertTrue(hasattr(snake_case , """cumulative""" ) )
self.assertTrue(hasattr(snake_case , """current""" ) )
self.assertTrue(hasattr(snake_case , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case , """log.txt""" ) , log_print=snake_case , trace_memory_line_by_line=snake_case , multi_process=snake_case , )
lowercase : List[Any] = PyTorchBenchmark(snake_case )
lowercase : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(snake_case , """log.txt""" ) ).exists() )
| 607 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Dict:
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->str:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCamelCase , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->List[str]:
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->List[str]:
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
@require_beam
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCamelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCamelCase )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
import apache_beam as beam
__a = beam.io.parquetio.WriteToParquet
__a = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
__a = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCamelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__a = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , 'default' , '0.0.0' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
__a = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCamelCase )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCamelCase )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset | 448 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
UpperCAmelCase_ = parser.parse_args()
return args
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ):
'''simple docstring'''
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
UpperCAmelCase_ = imgs[0].size
UpperCAmelCase_ = Image.new('''RGB''' , size=(cols * w, rows * h) )
UpperCAmelCase_ = grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str="robotic cat with wings" , _UpperCamelCase : Optional[Any]=7.5 , _UpperCamelCase : Dict=50 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Tuple=42 , ):
'''simple docstring'''
UpperCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(snake_case_ )
UpperCAmelCase_ = pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
UpperCAmelCase_ = int(math.sqrt(snake_case_ ) )
UpperCAmelCase_ = image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
lowercase__ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
lowercase__ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
lowercase__ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
lowercase__ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
lowercase__ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
lowercase__ : Any = unet.to(torch.device("cuda", args.cuda_id))
lowercase__ : str = pipeline.to(unet.device)
lowercase__ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
lowercase__ : List[Any] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.