code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int ):
"""simple docstring"""
_snake_case : list[list[int]] = []
_snake_case : list[int] = []
_snake_case : Tuple = 0
_snake_case : Optional[Any] = sum(snake_case__ )
create_state_space_tree(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return result
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : list[int] , snake_case__ : list[list[int]] , snake_case__ : int , ):
"""simple docstring"""
if sum(snake_case__ ) > max_sum or (remaining_nums_sum + sum(snake_case__ )) < max_sum:
return
if sum(snake_case__ ) == max_sum:
result.append(snake_case__ )
return
for index in range(snake_case__ , len(snake_case__ ) ):
create_state_space_tree(
snake_case__ , snake_case__ , index + 1 , [*path, nums[index]] , snake_case__ , remaining_nums_sum - nums[index] , )
A_ = [3, 34, 4, 12, 5, 2]
A_ = 9
A_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 64 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
_snake_case : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape
else:
_snake_case : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
_snake_case : int = value
elif weight_type == "weight_g":
_snake_case : str = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : List[str] = value
else:
_snake_case : int = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
_snake_case : List[Any] = []
_snake_case : Optional[Any] = fairseq_model.state_dict()
_snake_case : str = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
_snake_case : Optional[Any] = None
for name, value in fairseq_dict.items():
_snake_case : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : Dict = True
elif name.split(""".""" )[0] == "proj":
_snake_case : Dict = fairseq_model.proj
_snake_case : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : Dict = True
if "*" in mapped_key:
_snake_case : Optional[int] = name.split(snake_case__ )[0].split(""".""" )[-2]
_snake_case : Union[str, Any] = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : Optional[Any] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
_snake_case : int = """weight"""
else:
_snake_case : Optional[int] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def UpperCAmelCase__ (snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Any = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[int] = name.split(""".""" )
_snake_case : List[str] = int(items[0] )
_snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_snake_case : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_snake_case : List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_snake_case : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_snake_case : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = emb.weight.shape
_snake_case : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : Optional[Any] = [line.split(""" """ )[0] for line in lines]
_snake_case : str = len(snake_case__ )
_snake_case : Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(snake_case__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(snake_case__ )
_snake_case : List[str] = SpeechaTextaConfig.from_pretrained(
snake_case__ , vocab_size=snake_case__ , decoder_layers=snake_case__ , do_stable_layer_norm=snake_case__ )
_snake_case : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
_snake_case : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
_snake_case : Any = WavaVecaModel(snake_case__ )
_snake_case : Optional[Any] = recursively_load_weights_wavaveca(model.encoder , snake_case__ )
_snake_case : Optional[Any] = SpeechaTextaForCausalLM(snake_case__ )
_snake_case , _snake_case : List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
_snake_case : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
_snake_case : Any = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__ )
_snake_case : Any = False
# add projection layer
_snake_case : int = nn.Parameter(projection_layer.weight )
_snake_case : Any = nn.Parameter(projection_layer.bias )
_snake_case : Any = create_vocab_dict(snake_case__ )
with open(os.path.join(snake_case__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(snake_case__ , snake_case__ )
_snake_case : Dict = SpeechaTextaTokenizer(os.path.join(snake_case__ , """vocab.json""" ) )
tokenizer.save_pretrained(snake_case__ )
_snake_case : str = hf_wavavec.config.to_dict()
_snake_case : List[str] = tokenizer.pad_token_id
_snake_case : Union[str, Any] = tokenizer.bos_token_id
_snake_case : Union[str, Any] = tokenizer.eos_token_id
_snake_case : Optional[Any] = """speech_to_text_2"""
_snake_case : Optional[int] = """wav2vec2"""
_snake_case : Tuple = SpeechEncoderDecoderConfig.from_dict(snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 64 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=10_24 ,UpperCamelCase_=10_24 ,UpperCamelCase_=False ,**UpperCamelCase_ ):
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
snake_case = SeqaSeqDataset(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,type_path='''train''' ,**UpperCAmelCase__ )
snake_case = tok.pad_token_id
def get_lens(UpperCamelCase_ ):
snake_case = tqdm(
DataLoader(UpperCAmelCase__ ,batch_size=5_12 ,num_workers=8 ,shuffle=UpperCAmelCase__ ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
snake_case = []
for batch in dl:
snake_case = batch["""input_ids"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
snake_case = batch["""labels"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase__ ,UpperCAmelCase__ ):
max_lens.append(max(UpperCAmelCase__ ,UpperCAmelCase__ ) )
else:
max_lens.extend(UpperCAmelCase__ )
return max_lens
snake_case = get_lens(UpperCAmelCase__ )
snake_case = SeqaSeqDataset(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,type_path='''val''' ,**UpperCAmelCase__ )
snake_case = get_lens(UpperCAmelCase__ )
pickle_save(UpperCAmelCase__ ,train_ds.len_file )
pickle_save(UpperCAmelCase__ ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 371 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a_ ( self , __snake_case=0 ):
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
snake_case = np.random.RandomState(__snake_case )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self ):
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**__snake_case ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a_ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self ):
snake_case = ort.SessionOptions()
snake_case = False
return options
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = '''A fantasy landscape, trending on artstation'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : str , _A : List[Any] , _A : int=13 , _A : str=3 , _A : Optional[Any]=224 , _A : str=30 , _A : int=400 , _A : str=True , _A : List[str]=None , _A : Tuple=True , _A : List[str]=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , ) -> Any:
"""simple docstring"""
snake_case_ : int = size if size is not None else {'height': 18, 'width': 18}
snake_case_ : Tuple = parent
snake_case_ : int = batch_size
snake_case_ : Any = num_channels
snake_case_ : Any = image_size
snake_case_ : List[str] = min_resolution
snake_case_ : List[str] = max_resolution
snake_case_ : int = do_resize
snake_case_ : Dict = size
snake_case_ : Dict = do_normalize
snake_case_ : Optional[int] = image_mean
snake_case_ : Dict = image_std
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Any = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
snake_case_ : Optional[int] = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : List[Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
snake_case_ : str = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : str = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
snake_case_ : List[str] = image_processor(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 327 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
__SCREAMING_SNAKE_CASE : int = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0Xe_0_0_0
__SCREAMING_SNAKE_CASE : List[Any] = 0Xe_0_0_1
__SCREAMING_SNAKE_CASE : Dict = 0Xe_0_0_2
__SCREAMING_SNAKE_CASE : List[str] = 0Xe_0_0_3
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0Xe_0_0_4
# Maps special codepoints to human-readable names.
__SCREAMING_SNAKE_CASE : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__SCREAMING_SNAKE_CASE : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase_ ( __snake_case ):
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=chr(lowercase_ ) , lowercase_=False , lowercase_=2_048 , **lowercase_ , ):
_snake_case : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
_snake_case : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
_snake_case : Dict = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
_snake_case : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
_snake_case : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , model_max_length=lowercase_ , **lowercase_ , )
# Creates a mapping for looking up the IDs of special symbols.
_snake_case : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case : Union[str, Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case : str = UNICODE_VOCAB_SIZE
_snake_case : List[Any] = len(self._special_codepoints )
@property
def UpperCamelCase ( self ):
return self._unicode_vocab_size
def UpperCamelCase ( self , lowercase_ ):
return list(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
try:
return ord(lowercase_ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def UpperCamelCase ( self , lowercase_ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase_ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def UpperCamelCase ( self , lowercase_ ):
return "".join(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : int = [self.sep_token_id]
_snake_case : Optional[Any] = [self.cls_token_id]
_snake_case : Union[str, Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
_snake_case : Optional[Any] = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase_ )) + [1]
return result
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
_snake_case : int = [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
_snake_case : str = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase ( self , lowercase_ , lowercase_ = None ):
return () | 284 | from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class lowercase_ :
def __init__( self , lowercase_ ):
_snake_case : list[list[Edge]] = [[] for _ in range(lowercase_ )]
_snake_case : Union[str, Any] = size
def __getitem__( self , lowercase_ ):
return iter(self._graph[vertex] )
@property
def UpperCamelCase ( self ):
return self._size
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = deque([start_vertex] )
_snake_case : list[int | None] = [None] * self.size
_snake_case : Tuple = 0
while queue:
_snake_case : List[Any] = queue.popleft()
_snake_case : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_snake_case : Dict = current_distance + edge.weight
_snake_case : str = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
_snake_case : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 284 | 1 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_a = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
_a = []
_a = []
_a = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
_a = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
_a = 0
for log in Path().glob("""*.log"""):
_a = 0
with open(log, """r""") as f:
for line in f:
_a = json.loads(line)
if line.get("""nodeid""", """""") != "":
_a = line["""nodeid"""]
if line.get("""duration""", None) is not None:
_a = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_a = []
log.unlink()
_a = """"""
_a = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_a = []
_a = {}
for test in failed_tests:
_a = test[0].split("""::""")
_a = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
_a = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_a = [test[0] for test in failed_table]
_a = list(set(files))
# Count number of instances in failed_tests
_a = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_a = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_a = """Too many failed tests, please see the full report in the Action results."""
_a = len(err) + 10
_a = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
_a = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
_a = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
_a = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
_a = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
_a = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
_a = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
_a = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_a = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_a = row[0]
else:
_a = """"""
_a = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 194 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 2_24}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = crop_pct
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCamelCase__ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCamelCase__ = int(size["""height"""] / crop_pct )
else:
UpperCamelCase__ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
UpperCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size["""shortest_edge"""] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
UpperCamelCase__ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 244 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__UpperCamelCase : List[Any] = False
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_snake_case )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase = 12
lowerCAmelCase = 12
lowerCAmelCase = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
lowerCAmelCase = TransformeraDModel(**_snake_case )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_snake_case )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.dummy_vqvae
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_transformer
lowerCAmelCase = VQDiffusionScheduler(self.num_embed )
lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_snake_case , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
lowerCAmelCase = VQDiffusionPipeline(
vqvae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , transformer=_snake_case , scheduler=_snake_case , learned_classifier_free_sampling_embeddings=_snake_case , )
lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase = 'teddy bear playing in the pool'
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe([prompt] , generator=_snake_case , num_inference_steps=2 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipe(
[prompt] , generator=_snake_case , output_type='np' , return_dict=_snake_case , num_inference_steps=2 )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
lowerCAmelCase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
lowerCAmelCase = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
lowerCAmelCase = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(0 )
lowerCAmelCase = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_snake_case , output_type='np' , )
lowerCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 371 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__UpperCamelCase : Any = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**_snake_case )
lowerCAmelCase = do_lower_case
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 309 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :List[str] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a :Dict = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
a :Dict = F'''{src_lang}-{tgt_lang}'''
a :List[str] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
a :str = os.path.join(UpperCAmelCase_ , '''README.md''' )
print(F'''Generating {path}''' )
with open(UpperCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCAmelCase_ )
# make sure we are under the root of the project
snake_case : Union[str, Any] = Path(__file__).resolve().parent.parent.parent
snake_case : Optional[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case : Optional[int] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 94 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , _lowerCamelCase=1000 , ):
a :str = parent
a :str = batch_size
a :List[Any] = seq_length
a :Union[str, Any] = is_training
a :str = use_input_mask
a :Tuple = use_token_type_ids
a :Optional[int] = use_labels
a :Union[str, Any] = vocab_size
a :Optional[Any] = hidden_size
a :Any = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Tuple = intermediate_size
a :Dict = hidden_act
a :str = hidden_dropout_prob
a :List[Any] = attention_probs_dropout_prob
a :List[Any] = max_position_embeddings
a :List[str] = type_vocab_size
a :List[Any] = type_sequence_label_size
a :Union[str, Any] = initializer_range
a :Optional[Any] = num_labels
a :Optional[int] = num_choices
a :Union[str, Any] = scope
a :List[str] = range_bbox
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
a :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a :List[Any] = bbox[i, j, 3]
a :List[str] = bbox[i, j, 1]
a :List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a :Dict = bbox[i, j, 2]
a :Dict = bbox[i, j, 0]
a :Any = t
a :Optional[Any] = tf.convert_to_tensor(_lowerCamelCase )
a :int = None
if self.use_input_mask:
a :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a :Optional[int] = None
if self.use_token_type_ids:
a :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a :List[Any] = None
a :List[Any] = None
a :List[Any] = None
if self.use_labels:
a :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a :List[str] = ids_tensor([self.batch_size] , self.num_choices )
a :List[Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = TFLayoutLMModel(config=_lowerCamelCase )
a :Dict = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase , token_type_ids=_lowerCamelCase )
a :Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :List[str] = TFLayoutLMForMaskedLM(config=_lowerCamelCase )
a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.num_labels
a :List[Any] = TFLayoutLMForSequenceClassification(config=_lowerCamelCase )
a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :int = self.num_labels
a :Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCamelCase )
a :int = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[Any] = TFLayoutLMForQuestionAnswering(config=_lowerCamelCase )
a :Optional[int] = model(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) :List[Any] = config_and_inputs
a :Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 10
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = TFLayoutLMModelTester(self )
a :Dict = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a :str = TFLayoutLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip('''Onnx compliancy broke with TF 2.10''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231
a :Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
a :List[str] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
a :List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
a :Any = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' )
a , a , a , a , a :Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
a :Tuple = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
# test the sequence output on [0, :3, :3]
a :List[str] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
a :List[str] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCamelCase , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized sequence classification head
a :str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 )
a , a , a , a , a :List[str] = prepare_layoutlm_batch_inputs()
# forward pass
a :List[Any] = model(
input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
a :Union[str, Any] = outputs.loss
a :Optional[Any] = (2,)
self.assertEqual(loss.shape , _lowerCamelCase )
# test the shape of the logits
a :Any = outputs.logits
a :Tuple = (2, 2)
self.assertEqual(logits.shape , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized token classification head
a :Dict = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 )
a , a , a , a , a :Dict = prepare_layoutlm_batch_inputs()
# forward pass
a :List[Any] = model(
input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
# test the shape of the logits
a :Optional[Any] = outputs.logits
a :List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
# initialize model with randomly initialized token classification head
a :List[Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' )
a , a , a , a , a :Any = prepare_layoutlm_batch_inputs()
# forward pass
a :str = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
# test the shape of the logits
a :Optional[int] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _lowerCamelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCamelCase )
| 94 | 1 |
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
a__ = 0
a__ = len(__lowerCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
a__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
a__ = left
a__ = point
elif point > right:
a__ = right
a__ = point
else:
if item < current_item:
a__ = point - 1
else:
a__ = point + 1
return None
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , point + 1 , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if collection != sorted(__lowerCAmelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
snake_case : str = 0
if debug == 1:
snake_case : Union[str, Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
snake_case : Union[str, Any] = 67
snake_case : Union[str, Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 109 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'bird'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
a__ = pipe.prepare_image_inputs([canny_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'Chef in the kitchen'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
a__ = pipe.prepare_image_inputs([pose_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
for param in module.parameters():
__A : int = False
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__A : Optional[Any] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : Union[str, Any] = plt.imshow(a )
fig.axes.get_xaxis().set_visible(a )
fig.axes.get_yaxis().set_visible(a )
plt.show()
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : str = datetime.now()
__A : List[str] = current_time.strftime('%H:%M:%S' )
return timestamp
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( snake_case_ ):
__lowerCamelCase = DistilBertTokenizer
__lowerCamelCase = DistilBertTokenizerFast
__lowerCamelCase = True
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
lowercase__ = tokenizer.encode("sequence builders" , add_special_tokens=_lowercase )
lowercase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowercase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(_lowercase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 356 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = (3, 32, 1_28)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
lowercase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
lowercase__ = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :str ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] , **_lowercase :List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_lowercase , return_tensors="np" )
lowercase__ = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = processor(text=_lowercase )
lowercase__ = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = "test"
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(_lowercase )
lowercase__ = tokenizer.batch_decode(_lowercase )
lowercase__ = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase )
lowercase__ = torch.randn(1 , 27 , 38 )
lowercase__ = torch.randn(1 , 27 , 5_02_57 )
lowercase__ = torch.randn(1 , 27 , 3_05_22 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 201 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __UpperCamelCase ( a__ ):
@require_torch
def __a ( self ) -> str:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a : Union[str, Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a : Optional[int] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a : Optional[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a : Any = "1"
a : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> Optional[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a : str = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a : int = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a : str = self.get_env()
a : int = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> Tuple:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a : Any = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a : List[str] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a : int = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a : Dict = self.get_env()
a : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a : Tuple = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a : List[Any] = "1"
a : Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = "\nfrom transformers import pipeline\n "
a : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a : List[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a : List[Any] = self.get_env()
a : Optional[int] = "1"
a : Tuple = [sys.executable, "-c", "\n".join([load, mock, run] )]
a : List[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def __a ( self ) -> Tuple:
a : Optional[int] = "\nfrom transformers import AutoModel\n "
a : int = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a : Optional[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a : List[str] = self.get_env()
a : Optional[int] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a : Union[str, Any] = "1"
a : Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 105 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''char'''
lowerCamelCase_ = '''bpe'''
lowerCamelCase_ = '''wp'''
_UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ = '''ViTImageProcessor'''
lowerCamelCase_ = '''MgpstrTokenizer'''
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
A_ : Optional[int] = kwargs.pop('feature_extractor' )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A_ : Union[str, Any] = tokenizer
A_ : List[Any] = AutoTokenizer.from_pretrained('gpt2' )
A_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A_ : List[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None:
A_ : Union[str, Any] = self.char_tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A_ : Optional[int] = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ , A_ : int = sequences
A_ : Union[str, Any] = char_preds.size(0 )
A_ , A_ : Union[str, Any] = self._decode_helper(lowercase , 'char' )
A_ , A_ : List[str] = self._decode_helper(lowercase , 'bpe' )
A_ , A_ : Optional[int] = self._decode_helper(lowercase , 'wp' )
A_ : Dict = []
A_ : Optional[int] = []
for i in range(lowercase ):
A_ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
A_ : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
A_ : Union[str, Any] = scores.index(max(lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A_ : Dict = {}
A_ : str = final_strs
A_ : Union[str, Any] = final_scores
A_ : Optional[Any] = char_strs
A_ : Dict = bpe_strs
A_ : str = wp_strs
return out
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A_ : List[Any] = self.char_decode
A_ : List[Any] = 1
A_ : str = '[s]'
elif format == DecodeType.BPE:
A_ : List[Any] = self.bpe_decode
A_ : Optional[int] = 2
A_ : Tuple = '#'
elif format == DecodeType.WORDPIECE:
A_ : Optional[int] = self.wp_decode
A_ : Optional[int] = 1_0_2
A_ : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
A_ , A_ : Dict = [], []
A_ : Any = pred_logits.size(0 )
A_ : Optional[int] = pred_logits.size(1 )
A_ , A_ : int = pred_logits.topk(1 , dim=-1 , largest=lowercase , sorted=lowercase )
A_ : Dict = preds_index.view(-1 , lowercase )[:, 1:]
A_ : Any = decoder(lowercase )
A_ , A_ : List[Any] = torch.nn.functional.softmax(lowercase , dim=2 ).max(dim=2 )
A_ : List[str] = preds_max_prob[:, 1:]
for index in range(lowercase ):
A_ : int = preds_str[index].find(lowercase )
A_ : Union[str, Any] = preds_str[index][:pred_eos]
A_ : Dict = preds_index[index].cpu().tolist()
A_ : List[str] = pred_index.index(lowercase ) if eos_token in pred_index else -1
A_ : List[str] = preds_max_prob[index][: pred_eos_index + 1]
A_ : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase )
conf_scores.append(lowercase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowercase )]
return decode_strs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowercase )]
return decode_strs
| 140 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.', SCREAMING_SNAKE_CASE_, )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
| 103 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase__ = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
# Initialise PyTorch model
_lowerCamelCase : Dict = XLNetConfig.from_json_file(lowercase__ )
_lowerCamelCase : int = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_lowerCamelCase : int = finetuning_task
_lowerCamelCase : str = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCamelCase : Dict = XLNetForSequenceClassification(lowercase__ )
elif "squad" in finetuning_task:
_lowerCamelCase : Any = finetuning_task
_lowerCamelCase : Dict = XLNetForQuestionAnswering(lowercase__ )
else:
_lowerCamelCase : Tuple = XLNetLMHeadModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
_lowerCamelCase : Tuple = os.path.join(lowercase__ , lowercase__ )
_lowerCamelCase : Any = os.path.join(lowercase__ , lowercase__ )
print(f'''Save PyTorch model to {os.path.abspath(lowercase__ )}''' )
torch.save(model.state_dict() , lowercase__ )
print(f'''Save configuration file to {os.path.abspath(lowercase__ )}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowercase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 96 |
from pathlib import Path
import fire
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = Path(__lowerCAmelCase )
lowerCAmelCase_ = Path(__lowerCAmelCase )
dest_dir.mkdir(exist_ok=__lowerCAmelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ = dest_dir.joinpath(path.name )
print(__lowerCAmelCase )
dest_path.open("w" ).write("\n".join(__lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 231 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """encodec"""
def __init__( self : List[Any] , lowercase_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase_ : int=24_000 , lowercase_ : str=1 , lowercase_ : str=False , lowercase_ : Tuple=None , lowercase_ : List[str]=None , lowercase_ : Optional[int]=128 , lowercase_ : Any=32 , lowercase_ : Dict=1 , lowercase_ : Tuple=[8, 5, 4, 2] , lowercase_ : List[str]="weight_norm" , lowercase_ : Optional[int]=7 , lowercase_ : Optional[Any]=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Union[str, Any]="reflect" , lowercase_ : Optional[Any]=2 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Optional[int]=1_024 , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=True , **lowercase_ : str , ) -> Optional[Any]:
UpperCAmelCase : Any = target_bandwidths
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : List[str] = audio_channels
UpperCAmelCase : List[str] = normalize
UpperCAmelCase : Union[str, Any] = chunk_length_s
UpperCAmelCase : List[Any] = overlap
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[str] = num_filters
UpperCAmelCase : Tuple = num_residual_layers
UpperCAmelCase : List[Any] = upsampling_ratios
UpperCAmelCase : Union[str, Any] = norm_type
UpperCAmelCase : int = kernel_size
UpperCAmelCase : Union[str, Any] = last_kernel_size
UpperCAmelCase : Dict = residual_kernel_size
UpperCAmelCase : Union[str, Any] = dilation_growth_rate
UpperCAmelCase : Tuple = use_causal_conv
UpperCAmelCase : List[str] = pad_mode
UpperCAmelCase : List[str] = compress
UpperCAmelCase : Dict = num_lstm_layers
UpperCAmelCase : Dict = trim_right_ratio
UpperCAmelCase : List[str] = codebook_size
UpperCAmelCase : str = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 280 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
UpperCAmelCase : int = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCAmelCase : Dict = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = Accelerator()
lowercase__ = (accelerator.state.process_index + 2, 10)
lowercase__ = torch.randint(0, 10, shape).to(accelerator.device)
lowercase__ = ""
lowercase__ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase__ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase__ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 280 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''megatron-bert'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=29_056 , snake_case_ : int=1_024 , snake_case_ : Optional[int]=24 , snake_case_ : str=16 , snake_case_ : str=4_096 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=512 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : Optional[Any]=0 , snake_case_ : int="absolute" , snake_case_ : List[str]=True , **snake_case_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
| 247 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327 | 1 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["image_processor", "tokenizer"]
lowerCAmelCase : Dict = "OwlViTImageProcessor"
lowerCAmelCase : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : int ,_snake_case : Dict=None ,_snake_case : str=None ,**_snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : int = kwargs.pop('''feature_extractor''' )
lowercase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
def __call__( self : Tuple ,_snake_case : int=None ,_snake_case : Optional[int]=None ,_snake_case : Optional[Any]=None ,_snake_case : Any="max_length" ,_snake_case : Optional[Any]="np" ,**_snake_case : List[Any] ) -> Any:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(_snake_case ,_snake_case ) or (isinstance(_snake_case ,_snake_case ) and not isinstance(text[0] ,_snake_case )):
lowercase__ : List[Any] = [self.tokenizer(_snake_case ,padding=_snake_case ,return_tensors=_snake_case ,**_snake_case )]
elif isinstance(_snake_case ,_snake_case ) and isinstance(text[0] ,_snake_case ):
lowercase__ : int = []
# Maximum number of queries across batch
lowercase__ : int = max([len(_snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_snake_case ) != max_num_queries:
lowercase__ : Dict = t + [''' '''] * (max_num_queries - len(_snake_case ))
lowercase__ : Tuple = self.tokenizer(_snake_case ,padding=_snake_case ,return_tensors=_snake_case ,**_snake_case )
encodings.append(_snake_case )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowercase__ : Tuple = np.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Tuple = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ : Union[str, Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Optional[Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ : Dict = torch.cat([encoding['''input_ids'''] for encoding in encodings] ,dim=0 )
lowercase__ : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ : Dict = tf.stack([encoding['''input_ids'''] for encoding in encodings] ,axis=0 )
lowercase__ : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] ,axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowercase__ : Dict = BatchEncoding()
lowercase__ : Tuple = input_ids
lowercase__ : Any = attention_mask
if query_images is not None:
lowercase__ : Tuple = BatchEncoding()
lowercase__ : Dict = self.image_processor(
_snake_case ,return_tensors=_snake_case ,**_snake_case ).pixel_values
lowercase__ : Dict = query_pixel_values
if images is not None:
lowercase__ : Union[str, Any] = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Tuple ,*_snake_case : Union[str, Any] ,**_snake_case : Tuple ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Optional[Any] ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Dict ,*_snake_case : Optional[int] ,**_snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : List[Any] ,*_snake_case : Dict ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,*_snake_case : Dict ,**_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' ,_snake_case ,)
return self.image_processor
| 16 | """simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
print('Loading config file...' )
def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any="" , __SCREAMING_SNAKE_CASE : List[Any]="." ):
lowercase_ : List[str] = []
for k, v in d.items():
lowercase_ : Dict = parent_key + sep + k if parent_key else k
if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = argparse.Namespace()
with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
lowercase_ : str = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
lowercase_ : List[Any] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = MobileViTVaConfig()
lowercase_ : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowercase_ : List[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : str = 3_84
else:
lowercase_ : Dict = 2_56
lowercase_ : int = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowercase_ : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Optional[Any] = 3_84
else:
lowercase_ : Tuple = 2_56
lowercase_ : List[str] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowercase_ : int = 1_51
lowercase_ : Optional[Any] = 5_12
lowercase_ : str = 'ade20k-id2label.json'
lowercase_ : List[Any] = True
elif task_name.startswith('voc_' ):
lowercase_ : Union[str, Any] = 21
lowercase_ : Tuple = 5_12
lowercase_ : List[str] = 'pascal-voc-id2label.json'
lowercase_ : str = True
# orig_config
lowercase_ : Optional[int] = load_orig_config_file(__SCREAMING_SNAKE_CASE )
assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowercase_ : Optional[Any] = 'huggingface/label-files'
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : int = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if base_model:
lowercase_ : int = ''
else:
lowercase_ : str = 'mobilevitv2.'
lowercase_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ : Dict = k[8:]
else:
lowercase_ : Union[str, Any] = k
if ".block." in k:
lowercase_ : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
lowercase_ : List[Any] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowercase_ : str = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowercase_ : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowercase_ : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ : Dict = [0, 1]
elif i == 4:
lowercase_ : int = [0, 1, 2, 3]
elif i == 5:
lowercase_ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ : List[str] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ : str = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowercase_ : Any = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowercase_ : List[str] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowercase_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowercase_ : str = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowercase_ : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowercase_ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowercase_ : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowercase_ : Dict = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowercase_ : Tuple = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[int] = False
else:
lowercase_ : Any = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : int = False
# remove and rename some keys of load the original model
lowercase_ : Any = checkpoint
remove_unused_keys(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
lowercase_ : List[str] = outputs.logits
lowercase_ : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 213 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase_ : Optional[int] = logging.getLogger(__name__)
def __a ( _UpperCamelCase: List[Any]=2 , _UpperCamelCase: Any=3 , _UpperCamelCase: str=16 , _UpperCamelCase: int = 10 , _UpperCamelCase: int = 2 ) -> Tuple:
"""simple docstring"""
def get_dataset(_UpperCamelCase: int ):
_snake_case = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_snake_case = get_dataset(_UpperCamelCase )
_snake_case = get_dataset(_UpperCamelCase )
_snake_case = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
_snake_case = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Dict , _UpperCamelCase: Tuple , _UpperCamelCase: str , _UpperCamelCase: List[str]=None ) -> Tuple:
"""simple docstring"""
_snake_case = []
for epoch in range(_UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
_snake_case , _snake_case = batch
_snake_case = model(_UpperCamelCase )
_snake_case = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
accelerator.backward(_UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
def __init__( self ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Parameter(torch.randn(1 ) )
_snake_case = nn.Parameter(torch.randn(1 ) )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
return x * self.a + self.b
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(total_limit=1 ,project_dir=_SCREAMING_SNAKE_CASE ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
_snake_case = Accelerator(project_config=_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def _lowercase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case , _snake_case = dummy_dataloaders()
# Train baseline
_snake_case = Accelerator()
_snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE ,"initial" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
_snake_case = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
# Train partially
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = Accelerator()
_snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
accelerator.load_state(_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save everything
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoint" )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(_SCREAMING_SNAKE_CASE )
test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
_snake_case = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
_snake_case = train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
# Train partially
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
_snake_case = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = train(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_1" ) )
test_rands += train(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
((_snake_case) , (_snake_case)) = model.a.item(), model.b.item()
_snake_case = optimizer.state_dict()
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[Any]:
_snake_case = torch.tensor([1, 2, 3] )
_snake_case = torch.tensor([2, 3, 4] )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(net.parameters() )
_snake_case = Accelerator()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _lowercase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_snake_case = DummyModel()
_snake_case = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
_snake_case = torch.optim.lr_scheduler.StepLR(_SCREAMING_SNAKE_CASE ,step_size=1 ,gamma=0.9_9 )
_snake_case , _snake_case = dummy_dataloaders()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE )
# Train baseline
_snake_case = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
_snake_case = scheduler.state_dict()
train(3 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
self.assertEqual(_SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
def _lowercase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_snake_case = DummyModel()
_snake_case = ProjectConfiguration(automatic_checkpoint_naming=_SCREAMING_SNAKE_CASE ,total_limit=2 )
# Train baseline
_snake_case = Accelerator(project_dir=_SCREAMING_SNAKE_CASE ,project_config=_SCREAMING_SNAKE_CASE )
_snake_case = accelerator.prepare(_SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_10" ) ) )
@require_cuda
def _lowercase ( self ) -> Optional[int]:
_snake_case = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase_ : Dict = '''/tmp/accelerate/state_checkpointing'''
UpperCamelCase_ : Dict = DummyModel()
UpperCamelCase_ : Any = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCamelCase_ : Union[str, Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = dummy_dataloaders()
UpperCamelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase_ : Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase_ , UpperCamelCase_ : int = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase_ : int = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase_ : int = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
UpperCamelCase_ : Union[str, Any] = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
UpperCamelCase_ : List[Any] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 142 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ : Optional[Any] = 250004
UpperCamelCase_ : Union[str, Any] = 250020
@require_sentencepiece
@require_tokenizers
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
def _lowercase ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = MBartTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> Dict:
_snake_case = MBartTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
_snake_case = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_snake_case = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
def _lowercase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_snake_case = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE ,legacy_format=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE ,legacy_format=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE_ : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE_ : Optional[int] = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowercase ( cls ) -> List[str]:
_snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="en_XX" ,tgt_lang="ro_RO" )
_snake_case = 1
return cls
def _lowercase ( self ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] ,250_020 )
def _lowercase ( self ) -> Tuple:
_snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
self.assertIn(_SCREAMING_SNAKE_CASE ,self.tokenizer.all_special_ids )
_snake_case = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
_snake_case = self.tokenizer.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> List[Any]:
_snake_case = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] ,_SCREAMING_SNAKE_CASE )
_snake_case = 10
_snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) ,[250_026, 250_001] )
def _lowercase ( self ) -> str:
_snake_case = tempfile.mkdtemp()
_snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = MBartTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_SCREAMING_SNAKE_CASE )
@require_torch
def _lowercase ( self ) -> Dict:
_snake_case = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
_snake_case = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase ( self ) -> Optional[int]:
_snake_case = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
_snake_case = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_SCREAMING_SNAKE_CASE )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase ( self ) -> str:
_snake_case = self.tokenizer(self.src_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=3 ,return_tensors="pt" )
_snake_case = self.tokenizer(
text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=10 ,return_tensors="pt" )
_snake_case = targets["input_ids"]
_snake_case = shift_tokens_right(_SCREAMING_SNAKE_CASE ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _lowercase ( self ) -> Any:
_snake_case = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,{
# A, test, EOS, en_XX
"input_ids": [[62, 3_034, 2, 250_004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} ,)
| 142 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : int = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_snake_case : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple=False ) -> List[str]:
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _UpperCAmelCase ( _UpperCamelCase : Any, _UpperCamelCase : Any, _UpperCamelCase : Tuple=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
A_ = """"""
else:
A_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
A_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A__, A__ )
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Optional[Any], _UpperCamelCase : Dict ) -> str:
A_ = dct.pop(A__ )
A_ = val
def _UpperCAmelCase ( ) -> Tuple:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(A__, stream=A__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : List[Any] ) -> List[str]:
A_ = ViTConfig()
A_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ = True
A_ = int(vit_name[-12:-10] )
A_ = int(vit_name[-9:-6] )
else:
A_ = 10_00
A_ = """huggingface/label-files"""
A_ = """imagenet-1k-id2label.json"""
A_ = json.load(open(hf_hub_download(A__, A__, repo_type='''dataset''' ), '''r''' ) )
A_ = {int(A__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(vit_name[-6:-4] )
A_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
A_ = 1_92
A_ = 7_68
A_ = 12
A_ = 3
elif vit_name[9:].startswith('''small''' ):
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
A_ = 7_68
A_ = 23_04
A_ = 8
A_ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
elif vit_name[4:].startswith('''huge''' ):
A_ = 12_80
A_ = 51_20
A_ = 32
A_ = 16
# load original model from timm
A_ = timm.create_model(A__, pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
A_ = create_rename_keys(A__, A__ )
for src, dest in rename_keys:
rename_key(A__, A__, A__ )
read_in_q_k_v(A__, A__, A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ = ViTModel(A__ ).eval()
else:
A_ = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ = DeiTImageProcessor(size=config.image_size )
else:
A_ = ViTImageProcessor(size=config.image_size )
A_ = image_processor(images=prepare_img(), return_tensors='''pt''' )
A_ = encoding["""pixel_values"""]
A_ = model(A__ )
if base_model:
A_ = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__, outputs.pooler_output, atol=1E-3 )
else:
A_ = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__, outputs.logits, atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 350 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Dict = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCamelCase ( _a ):
'''simple docstring'''
__snake_case = """fnet"""
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=3_20_00 , lowerCAmelCase_ : List[str]=7_68 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Union[str, Any]=30_72 , lowerCAmelCase_ : Dict="gelu_new" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Any=5_12 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : str=1e-12 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Optional[Any]=2 , **lowerCAmelCase_ : List[str] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A__ : str =vocab_size
A__ : List[str] =max_position_embeddings
A__ : Union[str, Any] =hidden_size
A__ : Any =num_hidden_layers
A__ : List[str] =intermediate_size
A__ : Tuple =hidden_act
A__ : Optional[int] =hidden_dropout_prob
A__ : int =initializer_range
A__ : List[Any] =type_vocab_size
A__ : str =layer_norm_eps
A__ : Any =use_tpu_fourier_optimizations
A__ : Optional[int] =tpu_short_seq_length
| 134 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52 |
'''simple docstring'''
import math
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = sum(i * i for i in range(1 , n + 1 ) )
__lowercase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = data
UpperCAmelCase : Dict = None
UpperCAmelCase : str = None
def _snake_case ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
UpperCAmelCase : List[str] = input("""Enter the value of the root node: """ ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : str = TreeNode(int(UpperCamelCase ) )
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
UpperCAmelCase : Any = F"Enter the left node of {node_found.data}: "
UpperCAmelCase : int = input(UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCamelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCamelCase )
UpperCAmelCase : List[str] = F"Enter the right node of {node_found.data}: "
UpperCAmelCase : Optional[Any] = input(UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase : Union[str, Any] = TreeNode(int(UpperCamelCase ) )
UpperCAmelCase : List[str] = right_node
q.put(UpperCamelCase )
raise
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : str = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : Optional[int] = []
while not q.empty():
UpperCAmelCase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(UpperCamelCase )
UpperCAmelCase : Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Optional[Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : Optional[Any] = n.right
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(UpperCamelCase )
UpperCAmelCase : Optional[Any] = n.left
UpperCAmelCase : Tuple = stack.pop()
print(n.data , end=""",""" )
UpperCAmelCase : int = n.right
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Optional[int] = [], []
UpperCAmelCase : str = node
stacka.append(UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : str = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( UpperCamelCase : str = "" , UpperCamelCase : List[str]=50 , UpperCamelCase : int="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : Optional[int] = divmod(width - len(UpperCamelCase ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A: TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 109 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *UpperCamelCase : str , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Tuple=True , UpperCamelCase : Optional[int]=2 ):
from .. import __version__
UpperCAmelCase : Tuple = take_from
UpperCAmelCase : Optional[Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
UpperCAmelCase : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
UpperCAmelCase : Optional[int] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
UpperCAmelCase : List[str] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
UpperCAmelCase : List[Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
UpperCAmelCase : int = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
UpperCAmelCase : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
UpperCAmelCase : Optional[int] = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase : Union[str, Any] = call_frame.filename
UpperCAmelCase : List[Any] = call_frame.lineno
UpperCAmelCase : List[str] = call_frame.function
UpperCAmelCase , UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 109 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = LEDTokenizer
__A = LEDTokenizerFast
__A = True
def lowercase__ ( self : str ):
"""simple docstring"""
super().setUp()
lowercase_ :Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ :List[Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowercase_ :Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ :Tuple = {"unk_token": "<unk>"}
lowercase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def lowercase__ ( self : Tuple , **lowercase : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : Optional[int] , **lowercase : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def lowercase__ ( self : List[Any] , lowercase : str ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase_ :int = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Dict = tokenizer(lowercase , max_length=len(lowercase ) , padding=lowercase , return_tensors="pt" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase_ :Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase , lowercase )
@require_torch
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :str = tokenizer(lowercase , padding=lowercase , return_tensors="pt" )
self.assertIn("input_ids" , lowercase )
self.assertIn("attention_mask" , lowercase )
self.assertNotIn("labels" , lowercase )
self.assertNotIn("decoder_attention_mask" , lowercase )
@require_torch
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Any = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Union[str, Any] = tokenizer(text_target=lowercase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Optional[Any] = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=lowercase , truncation=lowercase , return_tensors="pt" )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :List[Any] = ["A long paragraph for summarization."]
lowercase_ :List[str] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :str = tokenizer(lowercase , return_tensors="pt" )
lowercase_ :List[Any] = tokenizer(text_target=lowercase , return_tensors="pt" )
lowercase_ :List[str] = inputs["input_ids"]
lowercase_ :Optional[int] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :List[Any] = ["Summary of the text.", "Another summary."]
lowercase_ :List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase_ :Tuple = tokenizer(lowercase , padding=lowercase )
lowercase_ :Optional[Any] = [[0] * len(lowercase ) for x in encoded_output["input_ids"]]
lowercase_ :Optional[Any] = tokenizer.pad(lowercase )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase )
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
pass
def lowercase__ ( self : Any ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ :Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
lowercase_ :Any = "A, <mask> AllenNLP sentence."
lowercase_ :Union[str, Any] = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
lowercase_ :List[Any] = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase_ :List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase_ :str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 147 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( _lowerCAmelCase ):
@staticmethod
@abstractmethod
def lowercase__ ( lowercase : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self : str ):
"""simple docstring"""
raise NotImplementedError()
| 147 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = np.max(_outputs , axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'sigmoid'
_SCREAMING_SNAKE_CASE = 'softmax'
_SCREAMING_SNAKE_CASE = 'none'
@add_end_docstrings(
_UpperCAmelCase , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
def __init__( self , **lowercase ) -> Tuple:
super().__init__(**lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _snake_case ( self , lowercase=None , lowercase=None , lowercase="" , **lowercase ) -> Dict:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCAmelCase = tokenizer_kwargs
lowerCAmelCase = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
lowerCAmelCase = self.model.config.return_all_scores
if isinstance(lowercase , lowercase ) or top_k is None:
lowerCAmelCase = top_k
lowerCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowercase , )
if return_all_scores:
lowerCAmelCase = None
else:
lowerCAmelCase = 1
if isinstance(lowercase , lowercase ):
lowerCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *lowercase , **lowercase ) -> int:
lowerCAmelCase = super().__call__(*lowercase , **lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase = """top_k""" not in kwargs
if isinstance(args[0] , lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _snake_case ( self , lowercase , **lowercase ) -> Dict[str, GenericTensor]:
lowerCAmelCase = self.framework
if isinstance(lowercase , lowercase ):
return self.tokenizer(**lowercase , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ) and len(lowercase ) == 1 and isinstance(inputs[0] , lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowercase , **lowercase )
elif isinstance(lowercase , lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Optional[Any]:
return self.model(**lowercase )
def _snake_case ( self , lowercase , lowercase=None , lowercase=1 , lowercase=True ) -> Optional[int]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
lowerCAmelCase = self.model.config.function_to_apply
else:
lowerCAmelCase = ClassificationFunction.NONE
lowerCAmelCase = model_outputs["""logits"""][0]
lowerCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase = sigmoid(lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase = softmax(lowercase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda lowercase : x["score"] , reverse=lowercase )
if top_k is not None:
lowerCAmelCase = dict_scores[:top_k]
return dict_scores
| 46 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "cpu", __magic_name__ = "openai/clip-vit-large-patch14" ) -> None:
"""simple docstring"""
UpperCamelCase__ : List[str] = device
UpperCamelCase__ : Union[str, Any] = CLIPTokenizerFast.from_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ : Union[str, Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
UpperCamelCase__ : List[str] = torchvision.transforms.Resize(224 )
UpperCamelCase__ : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.resize(__magic_name__ )
UpperCamelCase__ : Dict = self.center_crop(__magic_name__ )
UpperCamelCase__ : List[str] = self.normalize(__magic_name__ )
return images
def __call__( self, __magic_name__=None, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.tokenizer(text=__magic_name__, **__magic_name__ )
UpperCamelCase__ : List[Any] = self.preprocess_img(__magic_name__ )
UpperCamelCase__ : Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, __magic_name__=10, __magic_name__=0.01, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=None, __magic_name__=False, __magic_name__=True, __magic_name__="image", __magic_name__=True, __magic_name__=False, __magic_name__=False, __magic_name__=False, ) -> None:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = device if device else get_device()
if vqgan:
UpperCamelCase__ : Union[str, Any] = vqgan
else:
UpperCamelCase__ : Any = load_vqgan(self.device, conf_path=__magic_name__, ckpt_path=__magic_name__ )
self.vqgan.eval()
if clip:
UpperCamelCase__ : Optional[Any] = clip
else:
UpperCamelCase__ : Any = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCamelCase__ : str = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ : Union[str, Any] = iterations
UpperCamelCase__ : Tuple = lr
UpperCamelCase__ : Optional[int] = log
UpperCamelCase__ : List[Any] = make_grid
UpperCamelCase__ : Optional[Any] = return_val
UpperCamelCase__ : str = quantize
UpperCamelCase__ : int = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None, __magic_name__=5, __magic_name__=True ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
if output_path is None:
UpperCamelCase__ : List[str] = '''./animation.gif'''
if input_path is None:
UpperCamelCase__ : Union[str, Any] = self.save_path
UpperCamelCase__ : Tuple = sorted(glob(input_path + '''/*''' ) )
if not len(__magic_name__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__magic_name__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCamelCase__ : Dict = total_duration / len(__magic_name__ )
UpperCamelCase__ : List[Any] = [frame_duration] * len(__magic_name__ )
if extend_frames:
UpperCamelCase__ : List[Any] = 1.5
UpperCamelCase__ : Any = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__magic_name__ ) )
imageio.mimsave(__magic_name__, __magic_name__, duration=__magic_name__ )
print(f"gif saved to {output_path}" )
def UpperCamelCase__ ( self, __magic_name__=None, __magic_name__=None ) -> Any:
"""simple docstring"""
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCamelCase__ : List[Any] = preprocess(Image.open(__magic_name__ ), target_image_size=256 ).to(self.device )
UpperCamelCase__ : str = preprocess_vqgan(__magic_name__ )
UpperCamelCase__ ,*UpperCamelCase__ : Union[str, Any] = self.vqgan.encode(__magic_name__ )
return z
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCamelCase__ : Any = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ ,*UpperCamelCase__ : int = self.vqgan.quantize(__magic_name__ )
else:
UpperCamelCase__ : Optional[int] = trans_latent
return self.vqgan.decode(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.clip_preprocessor(text=__magic_name__, images=__magic_name__, return_tensors='''pt''', padding=__magic_name__ )
UpperCamelCase__ : Optional[int] = self.clip(**__magic_name__ )
UpperCamelCase__ : Tuple = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._get_clip_similarity(pos_prompts['''prompts'''], __magic_name__, weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCamelCase__ : Tuple = self._get_clip_similarity(neg_prompts['''prompts'''], __magic_name__, weights=neg_prompts['''weights'''] )
else:
UpperCamelCase__ : Optional[int] = torch.tensor([1], device=self.device )
UpperCamelCase__ : Tuple = -torch.log(__magic_name__ ) + torch.log(__magic_name__ )
return loss
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = torch.randn_like(self.latent, requires_grad=__magic_name__, device=self.device )
UpperCamelCase__ : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ : Tuple = self._add_vector(__magic_name__ )
UpperCamelCase__ : Any = loop_post_process(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self._get_CLIP_loss(__magic_name__, __magic_name__, __magic_name__ )
print('''CLIP loss''', __magic_name__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__magic_name__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
wandb.init(reinit=__magic_name__, project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCamelCase__ : List[str] = Image.open(__magic_name__ )
UpperCamelCase__ : List[Any] = image.resize((256, 256) )
wandb.log('''Original Image''', wandb.Image(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
if not prompts:
return []
UpperCamelCase__ : int = []
UpperCamelCase__ : str = []
if isinstance(__magic_name__, __magic_name__ ):
UpperCamelCase__ : Optional[Any] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__magic_name__, (tuple, list) ):
UpperCamelCase__ : Optional[int] = prompt[0]
UpperCamelCase__ : Dict = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = prompt.split(''':''' )
UpperCamelCase__ : List[Any] = float(__magic_name__ )
else:
UpperCamelCase__ : List[str] = prompt
UpperCamelCase__ : Any = 1.0
processed_prompts.append(__magic_name__ )
weights.append(__magic_name__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__magic_name__, device=self.device ),
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__=False, __magic_name__=True, __magic_name__=True, __magic_name__=None, ) -> str:
"""simple docstring"""
if image_path:
UpperCamelCase__ : Union[str, Any] = self._get_latent(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(__magic_name__, __magic_name__, __magic_name__ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ : Optional[Any] = self.process_prompts(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.process_prompts(__magic_name__ )
if save_final and save_path is None:
UpperCamelCase__ : str = os.path.join('''./outputs/''', '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__magic_name__ ):
os.makedirs(__magic_name__ )
else:
UpperCamelCase__ : int = save_path + '''_''' + get_timestamp()
os.makedirs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = save_path
UpperCamelCase__ : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__magic_name__ ) )
UpperCamelCase__ : Optional[Any] = loop_post_process(__magic_name__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(__magic_name__, __magic_name__, __magic_name__ ) ):
if show_intermediate:
show_pil(__magic_name__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__magic_name__ )} )
if show_final:
show_pil(__magic_name__ )
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png" ) )
| 201 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
A_ : str ="""src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : str =direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
A_ : int =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
A_ : str =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
A_ : Any =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
A_ : Optional[int] =[
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] )-> Dict:
_lowerCamelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , snake_case )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE_ ( )-> Dict:
_lowerCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCamelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_lowerCamelCase = collections.defaultdict(snake_case )
_lowerCamelCase = collections.defaultdict(snake_case )
_lowerCamelCase = collections.defaultdict(snake_case )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(snake_case ):
_lowerCamelCase = None
if _re_tf_models.match(snake_case ) is not None:
_lowerCamelCase = tf_models
_lowerCamelCase = _re_tf_models.match(snake_case ).groups()[0]
elif _re_flax_models.match(snake_case ) is not None:
_lowerCamelCase = flax_models
_lowerCamelCase = _re_flax_models.match(snake_case ).groups()[0]
elif _re_pt_models.match(snake_case ) is not None:
_lowerCamelCase = pt_models
_lowerCamelCase = _re_pt_models.match(snake_case ).groups()[0]
if lookup_dict is not None:
while len(snake_case ) > 0:
if attr_name in model_prefix_to_model_type:
_lowerCamelCase = True
break
# Try again after removing the last word in the name
_lowerCamelCase = ''.join(camel_case_split(snake_case )[:-1] )
_lowerCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_lowerCamelCase = list(snake_case )
all_models.sort()
_lowerCamelCase = {'model_type': all_models}
_lowerCamelCase = [pt_models[t] for t in all_models]
_lowerCamelCase = [tf_models[t] for t in all_models]
_lowerCamelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_lowerCamelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_lowerCamelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_lowerCamelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_lowerCamelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_lowerCamelCase = 'AutoTokenizer'
_lowerCamelCase = [processors[t] for t in all_models]
return pd.DataFrame(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> List[str]:
_lowerCamelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_lowerCamelCase = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
_lowerCamelCase = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(snake_case , snake_case , snake_case ):
# The type of pipeline may not exist in this framework
if not hasattr(snake_case , snake_case ):
continue
# First extract all model_names
_lowerCamelCase = []
for name in getattr(snake_case , snake_case ).values():
if isinstance(snake_case , snake_case ):
model_names.append(snake_case )
else:
model_names.extend(list(snake_case ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Union[str, Any] )-> Optional[Any]:
_lowerCamelCase = get_frameworks_table()
_lowerCamelCase = Dataset.from_pandas(snake_case )
_lowerCamelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=snake_case )
_lowerCamelCase = Dataset.from_json(snake_case )
_lowerCamelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(snake_case ) )
}
_lowerCamelCase = update_pipeline_and_auto_class_table(snake_case )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_lowerCamelCase = sorted(table.keys() )
_lowerCamelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
_lowerCamelCase = Dataset.from_pandas(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(snake_case , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(snake_case , 'pipeline_tags.json' ) )
if commit_sha is not None:
_lowerCamelCase = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
_lowerCamelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=snake_case , repo_type='dataset' , token=snake_case , commit_message=snake_case , )
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_lowerCamelCase = transformers_module.pipelines.SUPPORTED_TASKS
_lowerCamelCase = []
for key in pipeline_tasks:
if key not in in_table:
_lowerCamelCase = pipeline_tasks[key]['pt']
if isinstance(snake_case , (list, tuple) ):
_lowerCamelCase = model[0]
_lowerCamelCase = model.__name__
if model not in in_table.values():
missing.append(snake_case )
if len(snake_case ) > 0:
_lowerCamelCase = ', '.join(snake_case )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
A_ : Dict =argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
A_ : Optional[int] =parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 80 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 1 |
import math
def UpperCamelCase( __UpperCamelCase : int = 100 ):
lowerCAmelCase_ : Dict = sum(i * i for i in range(1 ,n + 1 ) )
lowerCAmelCase_ : Optional[int] = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 103 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : Union[str, Any] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
A__ : Optional[int] = '''hopper-medium-v2'''
A__ : int = gym.make(env_name)
A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
A__ : int = env.reset()
A__ : Optional[int] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 1000
A__ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : str = env.step(denorm_actions)
A__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 103 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase_ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = None ) -> Optional[int]:
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCAmelCase_ : str = os.path.abspath("examples" )
for item in os.listdir(lowerCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
UpperCAmelCase_ : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section="main()" if parser_only else "training_function()" , ):
UpperCAmelCase_ : Any = compare_against_test(
os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = "\n".join(lowerCAmelCase_ )
if special_strings is not None:
for string in special_strings:
UpperCAmelCase_ : Dict = diff.replace(lowerCAmelCase_ , "" )
self.assertEqual(lowerCAmelCase_ , "" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCAmelCase_ : List[Any] = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ (__A ):
__magic_name__ = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> Dict:
super().setUpClass()
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase_ : str = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> Dict:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : str = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCAmelCase_ : Optional[Any] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_ : List[str] = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
UpperCAmelCase_ : Tuple = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
if torch.cuda.is_available():
UpperCAmelCase_ : Dict = torch.cuda.device_count()
else:
UpperCAmelCase_ : Dict = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
else:
self.assertIn("epoch 0:" , lowerCAmelCase_ )
self.assertIn("epoch 1:" , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Tuple = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = re.findall("({.+})" , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = [r for r in results if "accuracy" in r][-1]
UpperCAmelCase_ : Any = ast.literal_eval(lowerCAmelCase_ )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : str = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCAmelCase_ : Optional[int] = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "tracking" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Any = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 253 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase_ = {'''bert_for_seq_generation''': 512}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = []
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<::::>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : List[Any] = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 253 | 1 |
from torch import nn
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 280 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase_ = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None ) -> str:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_lowerCAmelCase )
_A = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_lowerCAmelCase )
else:
_A = XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
_A = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
_A = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F'''Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'''Save configuration file to {os.path.abspath(_lowerCAmelCase )}''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowerCamelCase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 354 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a: List[Any] = logging.get_logger(__name__)
__a: Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
__a: List[str] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
__a: int = {
"""allenai/longformer-base-4096""": 40_96,
"""allenai/longformer-large-4096""": 40_96,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 40_96,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 40_96,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCamelCase ( ):
lowercase__ : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase__ : Optional[Any] = bs[:]
lowercase__ : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : Union[str, Any] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Optional[int]:
lowercase__ : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowercase__ : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowercase__ : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
lowercase__ : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowercase__ : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
lowercase__ : int = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Optional[int] = json.load(__UpperCAmelCase )
lowercase__ : List[str] = {v: k for k, v in self.encoder.items()}
lowercase__ : int = errors # how to handle errors in decoding
lowercase__ : Dict = bytes_to_unicode()
lowercase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase__ : List[str] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : Union[str, Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowercase__ : Optional[Any] = {}
lowercase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowerCAmelCase( self ) -> Any:
return len(self.encoder )
def _lowerCAmelCase( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
if token in self.cache:
return self.cache[token]
lowercase__ : Union[str, Any] = tuple(__UpperCAmelCase )
lowercase__ : Optional[Any] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowercase__ : Tuple = min(__UpperCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : List[Any] = bigram
lowercase__ : List[Any] = []
lowercase__ : Optional[Any] = 0
while i < len(__UpperCAmelCase ):
try:
lowercase__ : List[Any] = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Optional[Any] = tuple(__UpperCAmelCase )
lowercase__ : str = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowercase__ : Optional[int] = get_pairs(__UpperCAmelCase )
lowercase__ : Any = ''' '''.join(__UpperCAmelCase )
lowercase__ : List[str] = word
return word
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = []
for token in re.findall(self.pat , __UpperCAmelCase ):
lowercase__ : Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.decoder.get(__UpperCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Any = ''''''.join(__UpperCAmelCase )
lowercase__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Tuple = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '''\n''' )
lowercase__ : int = 0
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : Union[str, Any] = token_index
writer.write(''' '''.join(__UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Any:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : str = [self.cls_token_id]
lowercase__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ) -> List[Any]:
lowercase__ : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase__ : List[str] = ''' ''' + text
return (text, kwargs)
| 198 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = """▁"""
_a : List[str] = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_a : Tuple = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_a : Union[str, Any] = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_a : Dict = {
"""ernie-m-base""": 5_1_4,
"""ernie-m-large""": 5_1_4,
}
_a : int = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] =["input_ids"]
a : str =VOCAB_FILES_NAMES
a : str =PRETRAINED_INIT_CONFIGURATION
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] =RESOURCE_FILES_NAMES
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE="utf8",__SCREAMING_SNAKE_CASE="[UNK]",__SCREAMING_SNAKE_CASE="[SEP]",__SCREAMING_SNAKE_CASE="[PAD]",__SCREAMING_SNAKE_CASE="[CLS]",__SCREAMING_SNAKE_CASE="[MASK]",__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,vocab_file=__SCREAMING_SNAKE_CASE,encoding=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = sentencepiece_model_ckpt
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__lowerCAmelCase = self.load_vocab(filepath=__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = {self.sp_model.id_to_piece(__SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
__lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if text is None:
return None
__lowerCAmelCase = self.tokenize(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = """""", []
for i, ch in enumerate(__SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
__lowerCAmelCase = self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = unicodedata.normalize("""NFKC""",__SCREAMING_SNAKE_CASE )
if self.is_whitespace(__SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
__lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__lowerCAmelCase = token[1:]
__lowerCAmelCase = text[offset:].index(__SCREAMING_SNAKE_CASE ) + offset
__lowerCAmelCase = start + len(__SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__lowerCAmelCase = end
return token_mapping
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.vocab )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return dict(self.vocab,**self.added_tokens_encoder )
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) for c in text) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=64,__SCREAMING_SNAKE_CASE=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__lowerCAmelCase = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__lowerCAmelCase = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__lowerCAmelCase = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__lowerCAmelCase = self.sp_model.EncodeAsPieces(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = []
for pi, piece in enumerate(__SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(__SCREAMING_SNAKE_CASE )
continue
else:
continue
__lowerCAmelCase = 0
for i, chunk in enumerate(__SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__SCREAMING_SNAKE_CASE ) or self.is_punct(__SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__lowerCAmelCase = i
if len(__SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.vocab.get(__SCREAMING_SNAKE_CASE,self.vocab.get(self.unk_token ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.reverse_vocab.get(__SCREAMING_SNAKE_CASE,self.unk_token )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(__SCREAMING_SNAKE_CASE ) + 3)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase = unicodedata.category(__SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
with io.open(__SCREAMING_SNAKE_CASE,"""r""",encoding="""utf-8""" ) as f:
for index, line in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = line.rstrip("""\n""" )
__lowerCAmelCase = int(__SCREAMING_SNAKE_CASE )
return token_to_idx
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = 0
if os.path.isdir(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(__SCREAMING_SNAKE_CASE,"""w""",encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items(),key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
__lowerCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE,"""sentencepiece.bpe.model""" )
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 46 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_A : Any = namedtuple('covid_data', 'cases deaths recovered')
def _a ( UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(UpperCAmelCase ).content ).xpath(UpperCAmelCase ) )
_A : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 142 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
lowerCamelCase__ : List[str] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
lowerCamelCase__ : List[Any] = str(bin(UpperCAmelCase ) )[2:]
lowerCamelCase__ : Dict = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "instructblip_vision_model"
def __init__( self : Tuple , __lowerCamelCase : str=1408 , __lowerCamelCase : int=6144 , __lowerCamelCase : Any=39 , __lowerCamelCase : Dict=16 , __lowerCamelCase : Dict=224 , __lowerCamelCase : Tuple=14 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Tuple=1e-6 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=1e-10 , __lowerCamelCase : Dict=True , **__lowerCamelCase : List[Any] , ) -> Optional[int]:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = qkv_bias
@classmethod
def lowercase_ ( cls : List[str] , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "instructblip_qformer"
def __init__( self : Dict , __lowerCamelCase : Dict=3_0522 , __lowerCamelCase : str=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]="absolute" , __lowerCamelCase : Any=2 , __lowerCamelCase : str=1408 , **__lowerCamelCase : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = cross_attention_frequency
SCREAMING_SNAKE_CASE__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
SCREAMING_SNAKE_CASE__ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "instructblip"
a = True
def __init__( self : str , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Tuple=32 , **__lowerCamelCase : Optional[Any] ) -> List[str]:
super().__init__(**__lowerCamelCase )
if vision_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE__ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE__ = InstructBlipVisionConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = InstructBlipQFormerConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[text_model_type](**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE__ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE__ = num_query_tokens
SCREAMING_SNAKE_CASE__ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE__ = 1.0
SCREAMING_SNAKE_CASE__ = 0.02
@classmethod
def lowercase_ ( cls : Dict , __lowerCamelCase : InstructBlipVisionConfig , __lowerCamelCase : InstructBlipQFormerConfig , __lowerCamelCase : PretrainedConfig , **__lowerCamelCase : Tuple , ) -> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , )
def lowercase_ ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 218 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_SCREAMING_SNAKE_CASE : Tuple = data_utils.TransfoXLTokenizer
_SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLCorpus
_SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
_SCREAMING_SNAKE_CASE : Any = data_utils
def UpperCAmelCase_ ( _A , _A , _A , _A ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_A , '''rb''' ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(_A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(_A , _A )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , _A )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_A , _A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(_A )
SCREAMING_SNAKE_CASE__ = os.path.abspath(_A )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(_A )
print(F'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(_A )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(_A , _A , _A )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
print(F'''Save PyTorch model to {os.path.abspath(_A )}''' )
torch.save(model.state_dict() , _A )
print(F'''Save configuration file to {os.path.abspath(_A )}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 218 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ : Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Union[str, Any] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : Dict = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : List[str] = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : Tuple = re.compile(R"""\[([^\]]+)\]""")
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict="" , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Any=None ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
lowerCAmelCase_ : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
lowerCAmelCase_ : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Any = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
lowerCAmelCase_ : Any = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : List[str] = []
else:
blocks.append('\n'.join(snake_case_ ) )
lowerCAmelCase_ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append('\n'.join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
def _inner(lowerCAmelCase__ : Tuple ):
return key(snake_case_ ).lower().replace('_' , '' )
return _inner
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(lowerCAmelCase__ : Dict ):
return x
if key is None:
lowerCAmelCase_ : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : List[Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : List[str] = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
lowerCAmelCase_ : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ )
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
def _replace(lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : Any = match.groups()[0]
if "," not in imports:
return f"[{imports}]"
lowerCAmelCase_ : Union[str, Any] = [part.strip().replace('\"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : List[str] = keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(snake_case_ )] ) + "]"
lowerCAmelCase_ : str = import_statement.split('\n' )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : str = 2 if lines[1].strip() == """[""" else 1
lowerCAmelCase_ : Dict = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : int = sort_objects(snake_case_ , key=lambda lowerCAmelCase__ : x[1] )
lowerCAmelCase_ : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : List[Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : Optional[Any] = [part.strip().replace('\"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : List[Any] = keys[:-1]
lowerCAmelCase_ : int = get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : List[str] = _re_bracket_content.sub(_replace , snake_case_ )
return import_statement
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=True ) -> Union[str, Any]:
"""simple docstring"""
with open(snake_case_ , 'r' ) as f:
lowerCAmelCase_ : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : Dict = split_code_in_indented_blocks(
snake_case_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[Any] = main_blocks[block_idx]
lowerCAmelCase_ : Optional[int] = block.split('\n' )
# Get to the start of the imports.
lowerCAmelCase_ : Union[str, Any] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : List[str] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : Dict = """\n""".join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : Optional[int] = split_code_in_indented_blocks(snake_case_ , indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : Union[str, Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : Union[str, Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Optional[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
lowerCAmelCase_ : List[Any] = [x[0] for x in sorted(snake_case_ , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[Any] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(snake_case_ , 'w' ) as f:
f.write('\n'.join(snake_case_ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : int=True ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
lowerCAmelCase_ : Union[str, Any] = sort_imports(os.path.join(snake_case_ , '__init__.py' ) , check_only=snake_case_ )
if result:
lowerCAmelCase_ : Any = [os.path.join(snake_case_ , '__init__.py' )]
if len(snake_case_ ) > 0:
raise ValueError(f"Would overwrite {len(snake_case_ )} files, run `make style`." )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowercase__ : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 355 |
"""simple docstring"""
import baseaa
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def UpperCamelCase_ ( lowerCAmelCase__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowerCAmelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 0 |
__lowerCamelCase : int = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
UpperCamelCase : Optional[int] = "".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
UpperCamelCase : int = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase : List[str] = b"=" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
UpperCamelCase : List[str] = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def A_ ( _lowerCAmelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCamelCase : Optional[int] = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
UpperCamelCase : Union[str, Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
UpperCamelCase : Tuple = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase : Tuple = encoded_data[:-padding]
UpperCamelCase : Optional[Any] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase : Optional[int] = "".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase : int = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
def A_ ( _lowerCAmelCase = 50 ) -> int:
UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Dict = logging.get_logger(__name__)
lowerCAmelCase: str = '▁'
lowerCAmelCase: List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase: int = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCAmelCase: Union[str, Any] = {
'facebook/xglm-564M': 2_0_4_8,
}
class a__( __UpperCamelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : str , __snake_case : Tuple , __snake_case : Optional[int]="<s>" , __snake_case : int="</s>" , __snake_case : Any="</s>" , __snake_case : str="<s>" , __snake_case : List[Any]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Dict , ):
a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a : Dict = 7
a : List[Any] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
a : int = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
a : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a : Optional[int] = len(self.sp_model )
a : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
a : int = self.__dict__.copy()
a : List[Any] = None
a : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , __snake_case : Tuple ):
a : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a : Optional[int] = {}
a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowercase_ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def lowercase_ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def lowercase_ ( self : Union[str, Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def lowercase_ ( self : str ):
a : Tuple = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : List[Any] , __snake_case : str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def lowercase_ ( self : Any , __snake_case : Tuple ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a : Dict = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : Optional[int] , __snake_case : Optional[int] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : str , __snake_case : Tuple ):
a : str = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , ' ' ).strip()
return out_string
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : List[Any] = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
a : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,) | 370 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase: Optional[int] = parser.parse_args()
lowerCAmelCase: List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase: Optional[Any] = CLIPImageProcessor()
lowerCAmelCase: Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase: List[str] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 96 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_="divided_space_time", SCREAMING_SNAKE_CASE_=None, ) -> int:
UpperCAmelCase_: Union[str, Any] = parent
UpperCAmelCase_: str = batch_size
UpperCAmelCase_: str = image_size
UpperCAmelCase_: Optional[Any] = num_channels
UpperCAmelCase_: List[Any] = patch_size
UpperCAmelCase_: Optional[Any] = num_frames
UpperCAmelCase_: Any = is_training
UpperCAmelCase_: Union[str, Any] = use_labels
UpperCAmelCase_: Union[str, Any] = hidden_size
UpperCAmelCase_: Tuple = num_hidden_layers
UpperCAmelCase_: Optional[Any] = num_attention_heads
UpperCAmelCase_: Optional[Any] = intermediate_size
UpperCAmelCase_: Tuple = hidden_act
UpperCAmelCase_: Optional[int] = hidden_dropout_prob
UpperCAmelCase_: Tuple = attention_probs_dropout_prob
UpperCAmelCase_: Dict = attention_type
UpperCAmelCase_: List[Any] = initializer_range
UpperCAmelCase_: Union[str, Any] = scope
UpperCAmelCase_: List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase_: str = (image_size // patch_size) ** 2
UpperCAmelCase_: str = (num_frames) * self.num_patches_per_frame + 1
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: List[str] = None
if self.use_labels:
UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase_: Tuple = self.get_config()
return config, pixel_values, labels
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: int = TimesformerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, )
UpperCAmelCase_: Tuple = self.num_labels
return config
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Union[str, Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_ )
# verify the logits shape
UpperCAmelCase_: Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = config_and_inputs
UpperCAmelCase_: Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
A = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: str = TimesformerModelTester(self )
UpperCAmelCase_: Optional[Any] = ConfigTester(
self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __snake_case (self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def __snake_case (self ) -> Optional[int]:
pass
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase_: Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_, nn.Linear ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Tuple = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: int = [*signature.parameters.keys()]
UpperCAmelCase_: Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> Any:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Optional[Any] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
if not self.has_attentions:
pass
else:
UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_: List[Any] = True
for model_class in self.all_model_classes:
UpperCAmelCase_: str = self.model_tester.seq_length
UpperCAmelCase_: Any = self.model_tester.num_frames
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: int = False
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: Any = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_: Any = True
UpperCAmelCase_: Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], )
UpperCAmelCase_: Tuple = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
UpperCAmelCase_: Optional[int] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1, len(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], )
def __snake_case (self ) -> Dict:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: List[str] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = outputs.hidden_states
UpperCAmelCase_: int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
UpperCAmelCase_ , UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Tuple = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
UpperCAmelCase_: str = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Tuple:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.default_image_processor
UpperCAmelCase_: List[Any] = prepare_video()
UpperCAmelCase_: Optional[Any] = image_processor(video[:8], return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase_: Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
| 147 | 1 |
from collections.abc import Generator
def snake_case ( ) -> List[Any]:
_A , _A = 0, 1
while True:
_A , _A = b, a + b
yield b
def snake_case ( snake_case__ :int = 1_000) -> Any:
_A = 1
_A = fibonacci_generator()
while len(str(next(_a))) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 361 | import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 81 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a__ : Any = 'sshleifer/bart-tiny-random'
a__ : str = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return AutoConfig.from_pretrained(a )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=a )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=a )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __a ( self ):
UpperCamelCase__ , *UpperCamelCase__ = create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __a ( self ):
with self.assertRaises(a ):
create_student_by_copying_alternating_layers(a , tempfile.mkdtemp() , e=a , d=a )
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase : str = TypeVar('T')
lowerCAmelCase : Optional[Any] = Union[List[T], Tuple[T, ...]]
lowerCAmelCase : str = Union[T, List[T], Dict[str, T]]
lowerCAmelCase : Union[str, Any] = Union[str, bytes, os.PathLike]
| 253 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__)
class _A ( __magic_name__):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''')})
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel})
SCREAMING_SNAKE_CASE : str = "text"
SCREAMING_SNAKE_CASE : str = "labels"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE_ : List[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE_ : List[Any] = label_schema
return task_template
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 253 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = (IPNDMScheduler,)
a_ : List[str] = (("""num_inference_steps""", 50),)
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->List[Any]:
a_ = {"num_train_timesteps": 10_00}
config.update(**__UpperCAmelCase)
return config
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->Dict:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
if time_step is None:
a_ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self) ->List[str]:
pass
def UpperCAmelCase__ ( self , __UpperCAmelCase=0 , **__UpperCAmelCase) ->List[str]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
if time_step is None:
a_ = scheduler.timesteps[len(scheduler.timesteps) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase)
a_ = scheduler_class.from_pretrained(__UpperCAmelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self , **__UpperCAmelCase) ->Any:
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**__UpperCAmelCase)
a_ = scheduler_class(**__UpperCAmelCase)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
for i, t in enumerate(scheduler.timesteps):
a_ = model(__UpperCAmelCase , __UpperCAmelCase)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase).prev_sample
return sample
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , __UpperCAmelCase)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**__UpperCAmelCase)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps"):
scheduler.set_timesteps(__UpperCAmelCase)
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.timesteps[5]
a_ = scheduler.timesteps[6]
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
a_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def UpperCAmelCase__ ( self) ->Union[str, Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.full_loop()
a_ = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_mean.item() - 2_54_05_29) < 10 | 303 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str = """xlm-roberta"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) ->Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = classifier_dropout
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
]) | 303 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, Iterable[int]] , lowerCAmelCase__ : bool , lowerCAmelCase__ : int ) -> Tuple[int, int]:
def constraint_to_multiple_of(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : List[str]=None ):
__a = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__a = math.floor(val / multiple ) * multiple
if x < min_val:
__a = math.ceil(val / multiple ) * multiple
return x
__a = (output_size, output_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else output_size
__a , __a = get_image_size(lowerCAmelCase__ )
__a , __a = output_size
# determine new height and width
__a = output_height / input_height
__a = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__a = scale_width
else:
# fit height
__a = scale_height
__a = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase__ )
__a = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase__ )
return (new_height, new_width)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ):
super().__init__(**_a )
__a = size if size is not None else {'''height''': 384, '''width''': 384}
__a = get_size_dict(_a )
__a = do_resize
__a = size
__a = keep_aspect_ratio
__a = ensure_multiple_of
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ):
__a = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
__a = get_resize_output_image_size(
_a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a , _a = None , **_a , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(_a )
__a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_a ) for image in images]
if do_resize:
__a = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
__a = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__a = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__a = [to_channel_dimension_format(_a , _a ) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_a ):
__a = target_sizes.numpy()
__a = []
for idx in range(len(_a ) ):
__a = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a )
__a = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
__a = logits.argmax(dim=1 )
__a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 45 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCAmelCase : List[str] = 8
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 2_5_5).int().clamp(0, 2_5_5)
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c h w -> b c 1 h w''')
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c d h w -> b (c d) h w''')
__lowerCAmelCase = bits * 2 - 1
return bits
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase, dtype=torch.intaa)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b (c d) h w -> b c d h w''', d=8)
__lowerCAmelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''')
return (dec / 2_5_5).clamp(0.0, 1.0)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = True, lowerCamelCase=None, lowerCamelCase = True, ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase) else '''cpu'''
__lowerCAmelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=lowerCamelCase).to(lowerCamelCase)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase="epsilon", lowerCamelCase=None, lowerCamelCase = True, ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(lowerCamelCase, sample.shape[1], dim=1)
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""")
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=lowerCamelCase).to(model_output.device)
__lowerCAmelCase = (self._get_variance(lowerCamelCase, predicted_variance=lowerCamelCase) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = 1.0 , ):
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__(self , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 50 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ):
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
__lowerCAmelCase = decimal_to_bits(__lowercase ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__lowerCAmelCase = bits_to_decimal(__lowercase )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 174 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "convbert"
def __init__( self : str , UpperCamelCase : str=3_05_22 , UpperCamelCase : Any=7_68 , UpperCamelCase : Optional[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Optional[int]=30_72 , UpperCamelCase : int="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Optional[int]=5_12 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1E-1_2 , UpperCamelCase : List[str]=1 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : int=2 , UpperCamelCase : Optional[Any]=7_68 , UpperCamelCase : Any=2 , UpperCamelCase : Optional[int]=9 , UpperCamelCase : List[Any]=1 , UpperCamelCase : int=None , **UpperCamelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = embedding_size
lowerCAmelCase__ : List[Any] = head_ratio
lowerCAmelCase__ : List[Any] = conv_kernel_size
lowerCAmelCase__ : Dict = num_groups
lowerCAmelCase__ : int = classifier_dropout
class _lowerCamelCase ( a_ ):
@property
def _lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 212 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : str = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : str = ly_weight["""attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : int = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Any = ly_weight["""attention"""]
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f"""layers_{lyr_num}"""]
lowerCAmelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = ly_weight["""self_attention"""]
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : List[Any] = ly_weight["""MultiHeadDotProductAttention_0"""]
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowerCAmelCase__ : Dict = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowerCAmelCase__ : Tuple = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
lowerCAmelCase__ : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowerCAmelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : Optional[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowerCAmelCase__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A = parser.parse_args()
main(args)
| 212 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=None , ) -> Any:
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
self.assertTrue(hasattr(lowercase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowercase , """crop_size""" ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase = image_processing(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 46 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
# Automatically constructed
_SCREAMING_SNAKE_CASE = "dict"
_SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase )
def __call__( self ) -> Union[str, Any]:
return self.pa_type
def _snake_case ( self , lowercase ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase = BytesIO()
sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCAmelCase = BytesIO(bytes() )
sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _snake_case ( self , lowercase , lowercase = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCAmelCase = token_per_repo_id or {}
lowerCAmelCase = path.split("""::""" )[-1]
try:
lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
else:
lowerCAmelCase , lowerCAmelCase = sf.read(lowercase )
lowerCAmelCase = array.T
if self.mono:
lowerCAmelCase = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _snake_case ( self , lowercase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase = storage.field("""bytes""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase = storage.field("""path""" )
else:
lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def _snake_case ( self , lowercase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
lowerCAmelCase = f.read()
return bytes_
lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
| 46 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowercase__ : Tuple = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase__ : int = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
lowercase__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def _lowerCAmelCase ( __snake_case : List[str] ) -> List[str]:
def remove_articles(__snake_case : str ):
__A : Union[str, Any] = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(A__ , ' ' , A__ )
def white_space_fix(__snake_case : int ):
return " ".join(text.split() )
def remove_punc(__snake_case : List[Any] ):
__A : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Any ) -> str:
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any ) -> Tuple:
__A : Optional[Any] = [any(compute_exact(A__ , A__ ) for ref in refs ) for pred, refs in zip(A__ , A__ )]
return (sum(A__ ) / len(A__ )) * 1_00
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : int ) -> Union[str, Any]:
__A : Any = [rgram for rgrams in rgramslist for rgram in rgrams]
__A : Tuple = Counter(A__ )
__A : str = Counter(A__ )
__A : Tuple = Counter()
for sgram, scount in sgramcounter.items():
__A : Tuple = scount * numref
__A : str = Counter(A__ )
__A : Any = Counter()
for cgram, ccount in cgramcounter.items():
__A : Optional[Any] = ccount * numref
# KEEP
__A : Optional[Any] = sgramcounter_rep & cgramcounter_rep
__A : Tuple = keepgramcounter_rep & rgramcounter
__A : Dict = sgramcounter_rep & rgramcounter
__A : int = 0
__A : Optional[int] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__A : Optional[Any] = 1
__A : Optional[int] = 1
if len(A__ ) > 0:
__A : List[str] = keeptmpscorea / len(A__ )
if len(A__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__A : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__A : int = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__A : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__A : int = sgramcounter_rep - cgramcounter_rep
__A : str = delgramcounter_rep - rgramcounter
__A : List[Any] = sgramcounter_rep - rgramcounter
__A : Tuple = 0
__A : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__A : Optional[Any] = 1
if len(A__ ) > 0:
__A : List[str] = deltmpscorea / len(A__ )
# ADDITION
__A : Optional[int] = set(A__ ) - set(A__ )
__A : Tuple = set(A__ ) & set(A__ )
__A : Tuple = set(A__ ) - set(A__ )
__A : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__A : Union[str, Any] = 1
__A : List[Any] = 1
if len(A__ ) > 0:
__A : Union[str, Any] = addtmpscore / len(A__ )
if len(A__ ) > 0:
__A : List[str] = addtmpscore / len(A__ )
__A : str = 0
if addscore_precision > 0 or addscore_recall > 0:
__A : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCAmelCase ( __snake_case : str , __snake_case : List[str] , __snake_case : Tuple ) -> Optional[int]:
__A : Optional[Any] = len(A__ )
__A : Union[str, Any] = ssent.split(' ' )
__A : Tuple = csent.split(' ' )
__A : Optional[int] = []
__A : List[str] = []
__A : Optional[Any] = []
__A : Union[str, Any] = []
__A : Optional[Any] = []
__A : Tuple = []
__A : Optional[int] = []
__A : Tuple = []
__A : Dict = []
__A : Union[str, Any] = []
for rsent in rsents:
__A : int = rsent.split(' ' )
__A : Dict = []
__A : int = []
__A : Dict = []
ragramslist.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
__A : Optional[Any] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(A__ )
if i < len(A__ ) - 2:
__A : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(A__ )
if i < len(A__ ) - 3:
__A : Dict = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(A__ )
ragramslist.append(A__ )
ragramslist.append(A__ )
ragramslist.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
__A : Optional[Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(A__ )
if i < len(A__ ) - 2:
__A : str = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(A__ )
if i < len(A__ ) - 3:
__A : Union[str, Any] = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
__A : Tuple = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(A__ )
if i < len(A__ ) - 2:
__A : List[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(A__ )
if i < len(A__ ) - 3:
__A : List[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(A__ )
((__A) ,(__A) ,(__A)) : Any = SARIngram(A__ , A__ , A__ , A__ )
((__A) ,(__A) ,(__A)) : Union[str, Any] = SARIngram(A__ , A__ , A__ , A__ )
((__A) ,(__A) ,(__A)) : Optional[Any] = SARIngram(A__ , A__ , A__ , A__ )
((__A) ,(__A) ,(__A)) : Dict = SARIngram(A__ , A__ , A__ , A__ )
__A : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__A : Dict = sum([delascore, delascore, delascore, delascore] ) / 4
__A : Dict = sum([addascore, addascore, addascore, addascore] ) / 4
__A : List[str] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] = True , __snake_case : Optional[Any] = "13a" , __snake_case : List[Any] = True ) -> List[str]:
if lowercase:
__A : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__A : int = sacrebleu.metrics.bleu._get_tokenizer(A__ )()(A__ )
else:
__A : str = sacrebleu.TOKENIZERS[tokenizer]()(A__ )
elif tokenizer == "moses":
__A : List[str] = sacremoses.MosesTokenizer().tokenize(A__ , return_str=A__ , escape=A__ )
elif tokenizer == "penn":
__A : Tuple = sacremoses.MosesTokenizer().penn_tokenize(A__ , return_str=A__ )
else:
__A : Any = sentence
if not return_str:
__A : int = normalized_sent.split()
return normalized_sent
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : str , __snake_case : Union[str, Any] ) -> Dict:
if not (len(A__ ) == len(A__ ) == len(A__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__A : Optional[int] = 0
for src, pred, refs in zip(A__ , A__ , A__ ):
sari_score += SARIsent(normalize(A__ ) , normalize(A__ ) , [normalize(A__ ) for sent in refs] )
__A : Any = sari_score / len(A__ )
return 1_00 * sari_score
def _lowerCAmelCase ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any]="exp" , __snake_case : List[str]=None , __snake_case : str=False , __snake_case : int=False , __snake_case : int=False , ) -> Tuple:
__A : str = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__A : Tuple = [[refs[i] for refs in references] for i in range(A__ )]
__A : Tuple = sacrebleu.corpus_bleu(
A__ , A__ , smooth_method=A__ , smooth_value=A__ , force=A__ , lowercase=A__ , use_effective_order=A__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = {}
result.update({'sari': compute_sari(sources=lowercase__ , predictions=lowercase__ , references=lowercase__)})
result.update({'sacrebleu': compute_sacrebleu(predictions=lowercase__ , references=lowercase__)})
result.update({'exact': compute_em(predictions=lowercase__ , references=lowercase__)})
return result | 358 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 190 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase_( _snake_case : List[Any] ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def UpperCamelCase_( _snake_case : Dict , _snake_case : Tuple ):
"""simple docstring"""
return (-y * np.log(_snake_case ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Dict ):
"""simple docstring"""
__a =np.dot(_snake_case , _snake_case )
return np.sum(y * scores - np.log(1 + np.exp(_snake_case ) ) )
def UpperCamelCase_( _snake_case : str , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[str]=70000 ):
"""simple docstring"""
__a =np.zeros(x.shape[1] )
for iterations in range(_snake_case ):
__a =np.dot(_snake_case , _snake_case )
__a =sigmoid_function(_snake_case )
__a =np.dot(x.T , h - y ) / y.size
__a =theta - alpha * gradient # updating the weights
__a =np.dot(_snake_case , _snake_case )
__a =sigmoid_function(_snake_case )
__a =cost_function(_snake_case , _snake_case )
if iterations % 100 == 0:
print(F'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase : int = datasets.load_iris()
_lowerCAmelCase : Tuple = iris.data[:, :2]
_lowerCAmelCase : List[str] = (iris.target != 0) * 1
_lowerCAmelCase : Dict = 0.1
_lowerCAmelCase : int = logistic_reg(alpha, x, y, max_iterations=70_000)
print("theta: ", theta) # printing the theta i.e our weights vector
def UpperCamelCase_( _snake_case : int ):
"""simple docstring"""
return sigmoid_function(
np.dot(_snake_case , _snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((_lowerCAmelCase) , (_lowerCAmelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) : List[str] = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) : Union[str, Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 218 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase : str = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : str = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : Optional[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : List[str] , _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
__a =checkpoint[F'{old_prefix}.in_layers.0.weight']
__a =checkpoint[F'{old_prefix}.in_layers.0.bias']
__a =checkpoint[F'{old_prefix}.in_layers.2.weight']
__a =checkpoint[F'{old_prefix}.in_layers.2.bias']
__a =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__a =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__a =checkpoint[F'{old_prefix}.out_layers.0.weight']
__a =checkpoint[F'{old_prefix}.out_layers.0.bias']
__a =checkpoint[F'{old_prefix}.out_layers.3.weight']
__a =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__a =checkpoint[F'{old_prefix}.skip_connection.weight']
__a =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase_( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
__a =checkpoint[F'{old_prefix}.norm.weight']
__a =checkpoint[F'{old_prefix}.norm.bias']
__a =weight_q.squeeze(-1 ).squeeze(-1 )
__a =bias_q.squeeze(-1 ).squeeze(-1 )
__a =weight_k.squeeze(-1 ).squeeze(-1 )
__a =bias_k.squeeze(-1 ).squeeze(-1 )
__a =weight_v.squeeze(-1 ).squeeze(-1 )
__a =bias_v.squeeze(-1 ).squeeze(-1 )
__a =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__a =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( _snake_case : str , _snake_case : Tuple ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )
__a ={}
__a =checkpoint['time_embed.0.weight']
__a =checkpoint['time_embed.0.bias']
__a =checkpoint['time_embed.2.weight']
__a =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__a =checkpoint['label_emb.weight']
__a =checkpoint['input_blocks.0.0.weight']
__a =checkpoint['input_blocks.0.0.bias']
__a =unet_config['down_block_types']
__a =unet_config['layers_per_block']
__a =unet_config['attention_head_dim']
__a =unet_config['block_out_channels']
__a =1
__a =channels_list[0]
for i, layer_type in enumerate(_snake_case ):
__a =channels_list[i]
__a =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'down_blocks.{i}.attentions.{j}'
__a =F'input_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'down_blocks.{i}.downsamplers.0'
__a =F'input_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
__a =current_channels
# hardcoded the mid-block for now
__a ='mid_block.resnets.0'
__a ='middle_block.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.attentions.0'
__a ='middle_block.1'
__a =convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.resnets.1'
__a ='middle_block.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =0
__a =unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.1'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'up_blocks.{i}.attentions.{j}'
__a =F'output_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =checkpoint['out.0.weight']
__a =checkpoint['out.0.bias']
__a =checkpoint['out.2.weight']
__a =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = strabool(args.class_cond)
_lowerCAmelCase : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase : Tuple = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase : int = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_lowerCAmelCase : Any = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 218 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : CLIPSegForImageSegmentation , UpperCAmelCase_ : CLIPSegProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Any = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Optional[int] = FrozenDict(UpperCAmelCase_ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : str = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : str = FrozenDict(UpperCAmelCase_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def _A ( self : Tuple ):
self.enable_attention_slicing(UpperCAmelCase_ )
def _A ( self : Any ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
SCREAMING_SNAKE_CASE : int = self.segmentation_model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(UpperCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowercase = NewType("""DataClass""", Any)
__lowercase = NewType("""DataClassType""", Any)
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def lowercase ( A_ )-> Callable[[str], Any]:
'''simple docstring'''
a : Tuple = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def lowercase ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , )-> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a : List[Any] = {}
if aliases is not None:
a : Optional[Any] = aliases
if help is not None:
a : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Iterable[DataClassType]
def __init__( self : Optional[int] , __UpperCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **__UpperCAmelCase : Union[str, Any]):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
a : Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCAmelCase)
if dataclasses.is_dataclass(__UpperCAmelCase):
a : List[Any] = [dataclass_types]
a : Optional[int] = list(__UpperCAmelCase)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : ArgumentParser , __UpperCAmelCase : dataclasses.Field):
a : List[str] = f'''--{field.name}'''
a : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCAmelCase):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default")
a : Optional[Any] = kwargs.pop("aliases" , [])
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Dict = [aliases]
a : Union[str, Any] = getattr(field.type , "__origin__" , field.type)
if origin_type is Union or (hasattr(__UpperCAmelCase , "UnionType") and isinstance(__UpperCAmelCase , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(__UpperCAmelCase) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''')
if type(__UpperCAmelCase) not in field.type.__args__:
# filter `str` in Union
a : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a : List[str] = getattr(field.type , "__origin__" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a : str = (
field.type.__args__[0] if isinstance(__UpperCAmelCase , field.type.__args__[1]) else field.type.__args__[1]
)
a : str = getattr(field.type , "__origin__" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a : List[Any] = {}
if origin_type is Literal or (isinstance(field.type , __UpperCAmelCase) and issubclass(field.type , __UpperCAmelCase)):
if origin_type is Literal:
a : Dict = field.type.__args__
else:
a : Dict = [x.value for x in field.type]
a : int = make_choice_type_function(kwargs["choices"])
if field.default is not dataclasses.MISSING:
a : Optional[Any] = field.default
else:
a : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a : int = copy(__UpperCAmelCase)
# Hack because type=bool in argparse does not behave as we want.
a : Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a : Optional[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
a : Any = "?"
# This is the value that will get picked if we do --field_name (without value)
a : List[Any] = True
elif isclass(__UpperCAmelCase) and issubclass(__UpperCAmelCase , __UpperCAmelCase):
a : List[str] = field.type.__args__[0]
a : Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
a : List[Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
a : Optional[Any] = True
else:
a : Tuple = field.type
if field.default is not dataclasses.MISSING:
a : List[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
a : str = field.default_factory()
else:
a : List[str] = True
parser.add_argument(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a : Any = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : DataClassType):
if hasattr(__UpperCAmelCase , "_argument_group_name"):
a : Tuple = self.add_argument_group(dtype._argument_group_name)
else:
a : Union[str, Any] = self
try:
a : Dict[str, type] = get_type_hints(__UpperCAmelCase)
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCAmelCase):
a : Tuple = ".".join(map(__UpperCAmelCase , sys.version_info[:3]))
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`.") from ex
raise
for field in dataclasses.fields(__UpperCAmelCase):
if not field.init:
continue
a : List[Any] = type_hints[field.name]
self._parse_dataclass_field(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : str , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
a : Optional[Any] = []
if args_filename:
args_files.append(Path(__UpperCAmelCase))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(".args"))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a : List[str] = ArgumentParser()
args_file_parser.add_argument(__UpperCAmelCase , type=__UpperCAmelCase , action="append")
# Use only remaining args for further parsing (remove the args_file_flag)
a , a : str = args_file_parser.parse_known_args(args=__UpperCAmelCase)
a : List[str] = vars(__UpperCAmelCase).get(args_file_flag.lstrip("-") , __UpperCAmelCase)
if cmd_args_file_paths:
args_files.extend([Path(__UpperCAmelCase) for p in cmd_args_file_paths])
a : int = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
a , a : Tuple = self.parse_known_args(args=__UpperCAmelCase)
a : str = []
for dtype in self.dataclass_types:
a : Union[str, Any] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init}
a : List[Any] = {k: v for k, v in vars(__UpperCAmelCase).items() if k in keys}
for k in keys:
delattr(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = dtype(**__UpperCAmelCase)
outputs.append(__UpperCAmelCase)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(__UpperCAmelCase)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''')
return (*outputs,)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict[str, Any] , __UpperCAmelCase : bool = False):
a : List[str] = set(args.keys())
a : Optional[Any] = []
for dtype in self.dataclass_types:
a : Optional[int] = {f.name for f in dataclasses.fields(__UpperCAmelCase) if f.init}
a : Tuple = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
a : Any = dtype(**__UpperCAmelCase)
outputs.append(__UpperCAmelCase)
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__UpperCAmelCase)}''')
return tuple(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
with open(Path(__UpperCAmelCase) , encoding="utf-8") as open_json_file:
a : Any = json.loads(open_json_file.read())
a : Optional[Any] = self.parse_dict(__UpperCAmelCase , allow_extra_keys=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : bool = False):
a : Any = self.parse_dict(yaml.safe_load(Path(__UpperCAmelCase).read_text()) , allow_extra_keys=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 40 | """simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if length <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 280 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = VQModel(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=3,)
return model
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModel(_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_vq_model
SCREAMING_SNAKE_CASE_ : Optional[int] = LDMPipeline(unet=_A,vqvae=_A,scheduler=_A )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = ldm(generator=_A,num_inference_steps=2,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = ldm(generator=_A,num_inference_steps=2,output_type="numpy",return_dict=_A )[0]
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
SCREAMING_SNAKE_CASE_ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = ldm(generator=_A,num_inference_steps=5,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
SCREAMING_SNAKE_CASE_ : List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main() | 96 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : Dict = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : Dict = 4.0 # Largest value for most examples
lowercase__ : str = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=(-1.0, 1.0) , lowercase_ : Optional[int]=False ) -> Any:
lowercase__ , lowercase__ : Dict = output_range
if clip:
lowercase__ : List[str] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=(-1.0, 1.0) , lowercase_ : Optional[Any]=False ) -> Dict:
lowercase__ , lowercase__ : Optional[Any] = input_range
lowercase__ : str = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
lowercase__ : List[str] = input_tokens > 0
lowercase__ , lowercase__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> int:
lowercase__ : Tuple = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : Dict = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : str = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : Tuple = ones
lowercase__ : int = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : int = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Any = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : List[Any] = mel.cpu().float().numpy()
lowercase__ : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
'''simple docstring'''
import itertools
import math
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =2
while True:
if is_prime(__snake_case ):
yield num
num += 1
def a_ ( __snake_case : int = 1_0001 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , __snake_case ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
a =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
a =parse_args()
# Import training_script as a module.
a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a =script_fpath.stem
a =importlib.import_module(lowercase )
# Patch sys.argv
a =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 81 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : int
A : TreeNode | None = None
A : TreeNode | None = None
lowercase_ = namedtuple("CoinsDistribResult", "moves excess")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__snake_case : Tuple = get_distrib(node.left )
__snake_case : str = get_distrib(node.right )
__snake_case : List[str] = 1 - left_distrib_excess
__snake_case : str = 1 - right_distrib_excess
__snake_case : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
__snake_case : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 | import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
A_ = 'src/transformers'
# Matches is_xxx_available()
A_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
A_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
A_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
A_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
A_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
A_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
A_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
A_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
A_ = re.compile(r'''^\s*else:''')
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
if _re_test_backend.search(snake_case_ ) is None:
return None
_snake_case : Any = [b[0] for b in _re_backend.findall(snake_case_ )]
backends.sort()
return "_and_".join(snake_case_ )
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Any = f.readlines()
_snake_case : List[Any] = 0
while line_index < len(snake_case_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case_ ):
return None
# First grab the objects without a specific backend in _import_structure
_snake_case : List[Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_snake_case : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case_ ):
_snake_case : Tuple = _re_one_line_import_struct.search(snake_case_ ).groups()[0]
_snake_case : Union[str, Any] = re.findall("""\[([^\]]+)\]""" , snake_case_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_snake_case : Union[str, Any] = _re_import_struct_key_value.search(snake_case_ )
if single_line_import_search is not None:
_snake_case : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_snake_case : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_snake_case : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_snake_case : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(snake_case_ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case_ ) is not None:
_snake_case : Any = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(""", """ )
_snake_case : List[Any] = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_between_brackets.search(snake_case_ ) is not None:
_snake_case : Any = _re_between_brackets.search(snake_case_ ).groups()[0].split(""", """ )
_snake_case : Any = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0]
objects.extend(snake_case_ )
elif _re_quote_object.search(snake_case_ ) is not None:
objects.append(_re_quote_object.search(snake_case_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_snake_case : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_snake_case : Optional[Any] = []
while (
line_index < len(snake_case_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_snake_case : Union[str, Any] = lines[line_index]
_snake_case : Any = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_snake_case : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_snake_case : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_snake_case : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_snake_case : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_snake_case : str = lines[line_index]
_snake_case : Dict = _re_import.search(snake_case_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_snake_case : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : int ):
"""simple docstring"""
def find_duplicates(snake_case__ : Tuple ):
return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_snake_case : str = []
for key in import_dict_objects.keys():
_snake_case : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
_snake_case : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_snake_case : int = """base imports""" if key == """none""" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
_snake_case : List[Any] = os.path.join(snake_case_ , """__init__.py""" )
_snake_case : Tuple = parse_init(snake_case_ )
if objects is not None:
_snake_case : Dict = analyze_results(*snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : List[Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(snake_case_ ) )
if len(snake_case_ ) > 0:
raise ValueError("""\n\n""".join(snake_case_ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = []
for path, directories, files in os.walk(snake_case_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(snake_case_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_snake_case : str = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) )
_snake_case : str = short_path.replace(os.path.sep , """.""" )
submodules.append(snake_case_ )
for fname in files:
if fname == "__init__.py":
continue
_snake_case : int = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) )
_snake_case : Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(snake_case_ )
return submodules
A_ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case_ , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_snake_case : Dict = spec.loader.load_module()
_snake_case : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case_ ) > 0:
_snake_case : Dict = """\n""".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 368 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
A_ = logging.get_logger(__name__)
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : int=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_snake_case : Dict = os.path.abspath(snake_case__ )
logger.info(F"Loading PyTorch weights from {pt_path}" )
_snake_case : Tuple = torch.load(snake_case__ , map_location="""cpu""" )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_snake_case : int = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_snake_case : Dict = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def UpperCAmelCase__ (snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_snake_case : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_snake_case : Any = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_snake_case : Any = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_snake_case : List[str] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
_snake_case : List[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_snake_case : List[Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_snake_case : Tuple = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_snake_case : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_snake_case : Union[str, Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_snake_case : Dict = pt_tuple_key[-2] + """_v"""
if name is not None:
_snake_case : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_snake_case : Dict = flax_model.params["""params"""]
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(snake_case__ )
_snake_case : Tuple = {}
_snake_case : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : int = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_snake_case : Union[str, Any] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
import torch
# Load the index
_snake_case : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
_snake_case : Union[str, Any] = torch.load(snake_case__ )
_snake_case : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_snake_case : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_snake_case : str = flax_model.params["""params"""]
_snake_case : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_snake_case : List[Any] = flax_model.params
_snake_case : Tuple = flatten_dict(snake_case__ )
_snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_snake_case : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_snake_case : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_snake_case : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : Optional[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_snake_case , _snake_case : Optional[Any] = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
_snake_case : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_snake_case : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
_snake_case : Any = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
_snake_case : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
_snake_case : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = os.path.abspath(snake_case__ )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_snake_case : Union[str, Any] = getattr(snake_case__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , """rb""" ) as state_f:
try:
_snake_case : Dict = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_snake_case : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_snake_case : Optional[int] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
_snake_case : Dict = flatten_dict(snake_case__ )
_snake_case : Optional[Any] = pt_model.state_dict()
_snake_case : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_snake_case : Optional[int] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_snake_case : str = []
_snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_snake_case : Tuple = flax_key_tuple[0] == pt_model.base_model_prefix
_snake_case : Optional[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_snake_case : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_snake_case : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : int = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_snake_case : Tuple = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_snake_case : int = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_snake_case : int = """.""".join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_snake_case : List[str] = key.split(""".""" )
_snake_case : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_snake_case : int = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_snake_case : Union[str, Any] = key_components[-2] + """_v"""
if name is not None:
_snake_case : Dict = key_components[:-3] + [name]
_snake_case : Dict = """.""".join(snake_case__ )
_snake_case : str = key
if flax_key in special_pt_names:
_snake_case : Union[str, Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_snake_case : List[str] = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
_snake_case : List[Any] = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
_snake_case : List[str] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(snake_case__ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 132 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=2 , lowercase=3 , lowercase=64 , lowercase=None ):
_lowerCamelCase : Optional[int] = np.random.default_rng(lowercase )
_lowerCamelCase : Dict = length
_lowerCamelCase : Dict = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , lowercase ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=0 , lowercase=0 , lowercase=False ):
super().__init__()
_lowerCamelCase : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Dict = True
def A_ ( self , lowercase=None ):
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_lowerCamelCase : int = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=0 , lowercase=0 , lowercase=False ):
super().__init__()
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(lowercase ).float() )
_lowerCamelCase : List[Any] = torch.nn.Parameter(torch.tensor(lowercase ).float() )
_lowerCamelCase : List[Any] = True
def A_ ( self , lowercase=None ):
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_lowerCamelCase : Union[str, Any] = False
return x * self.a + self.b
def _snake_case ( lowercase__ , lowercase__ = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCamelCase : Optional[int] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_lowerCamelCase : str = load_dataset('csv' , data_files=__lowercase )
_lowerCamelCase : Union[str, Any] = datasets['train'].unique('label' )
_lowerCamelCase : List[str] = {v: i for i, v in enumerate(__lowercase )}
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : int = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase , padding='max_length' )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : List[Any] = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(__lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=2 )
_lowerCamelCase : Any = DataLoader(tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1 )
return train_dataloader, eval_dataloader | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = """▁"""
lowercase__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowercase__ = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowercase__ = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
lowercase__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : Any = ["""input_ids""", """attention_mask"""]
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : Any , a_ : List[Any] , a_ : Any=None , a_ : Any=None , a_ : List[str]="</s>" , a_ : Optional[Any]="</s>" , a_ : List[Any]="<s>" , a_ : Union[str, Any]="<unk>" , a_ : Dict="<pad>" , a_ : Optional[Any]="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
lowerCAmelCase_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : str = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
lowerCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
lowerCAmelCase_ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : Optional[Any] = len(self.sp_model )
lowerCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ )
}
lowerCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase_ : str = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase ( self : Dict ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase ( self : Optional[Any] ):
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : Optional[Any] , a_ : str ):
lowerCAmelCase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ):
lowerCAmelCase_ : List[Any] = self.__dict__.copy()
lowerCAmelCase_ : Union[str, Any] = None
return state
def __setstate__( self : Union[str, Any] , a_ : Dict ):
lowerCAmelCase_ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : Optional[int] , a_ : str ):
return self.sp_model.encode(a_ , out_type=a_ )
def lowerCamelCase ( self : List[str] , a_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Dict = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : Union[str, Any] , a_ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : int = []
lowerCAmelCase_ : int = ""
lowerCAmelCase_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[str] = []
else:
current_sub_tokens.append(a_ )
lowerCAmelCase_ : Union[str, Any] = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowerCamelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ):
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Union[str, Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
lowerCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def lowerCamelCase ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
lowerCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
lowerCAmelCase_ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def lowerCamelCase ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Optional[int] , a_ : Union[str, Any] , a_ : str , a_ : Optional[str] , a_ : Optional[str] , **a_ : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase_ : Optional[int] = src_lang
lowerCAmelCase_ : List[str] = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
lowerCAmelCase_ : int = self.convert_tokens_to_ids(a_ )
lowerCAmelCase_ : Optional[int] = tgt_lang_id
return inputs
def lowerCamelCase ( self : str , a_ : List[str] , a_ : str = "en_XX" , a_ : Optional[List[str]] = None , a_ : str = "ro_RO" , **a_ : Optional[Any] , ):
lowerCAmelCase_ : int = src_lang
lowerCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def lowerCamelCase ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase ( self : Tuple , a_ : str ):
lowerCAmelCase_ : Optional[Any] = self.lang_code_to_id[src_lang]
lowerCAmelCase_ : str = [self.cur_lang_code_id]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
def lowerCamelCase ( self : Union[str, Any] , a_ : str ):
lowerCAmelCase_ : Optional[Any] = self.lang_code_to_id[tgt_lang]
lowerCAmelCase_ : int = [self.cur_lang_code_id]
lowerCAmelCase_ : Optional[Any] = [self.eos_token_id]
| 366 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , a_ : List[str] , a_ : Tuple=7 , a_ : Any=3 , a_ : Union[str, Any]=18 , a_ : List[str]=30 , a_ : List[str]=4_00 , a_ : str=True , a_ : Tuple=None , a_ : str=True , a_ : Optional[int]=None , ):
lowerCAmelCase_ : Any = size if size is not None else {"shortest_edge": 20}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : int = min_resolution
lowerCAmelCase_ : Tuple = max_resolution
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Any = do_center_crop
lowerCAmelCase_ : Tuple = crop_size
def lowerCamelCase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : int = MobileNetVaImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "crop_size" ) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase ( self : Tuple ):
pass
def lowerCamelCase ( self : Any ):
# Initialize image_processing
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : List[str] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : str ):
# Initialize image_processing
lowerCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ : str = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 161 | 0 |
import os
from distutils.util import strtobool
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for e in env_keys:
lowerCAmelCase__ : Union[str, Any] = int(os.environ.get(SCREAMING_SNAKE_CASE_ , -1 ) )
if val >= 0:
return val
return default
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[str]:
lowerCAmelCase__ : Optional[int] = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return strtobool(SCREAMING_SNAKE_CASE_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="no" ) -> List[str]:
lowerCAmelCase__ : Optional[int] = os.environ.get(SCREAMING_SNAKE_CASE_ , str(SCREAMING_SNAKE_CASE_ ) )
return value | 212 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.array:
lowerCAmelCase__ : Dict = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
lowerCAmelCase__ : Optional[Any] = 'f32le'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ : List[Any] = ffmpeg_process.communicate(SCREAMING_SNAKE_CASE_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase__ : List[str] = output_stream[0]
lowerCAmelCase__ : str = np.frombuffer(SCREAMING_SNAKE_CASE_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> Dict:
lowerCAmelCase__ : Optional[Any] = F'''{sampling_rate}'''
lowerCAmelCase__ : Any = '1'
if format_for_conversion == "s16le":
lowerCAmelCase__ : Dict = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : List[str] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowerCAmelCase__ : Tuple = platform.system()
if system == "Linux":
lowerCAmelCase__ : str = 'alsa'
lowerCAmelCase__ : str = 'default'
elif system == "Darwin":
lowerCAmelCase__ : Any = 'avfoundation'
lowerCAmelCase__ : Tuple = ':0'
elif system == "Windows":
lowerCAmelCase__ : Any = 'dshow'
lowerCAmelCase__ : int = 'default'
lowerCAmelCase__ : Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase__ : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ : str = _ffmpeg_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
lowerCAmelCase__ : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase__ : Tuple = chunk_length_s
lowerCAmelCase__ : Any = ffmpeg_microphone(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , format_for_conversion=SCREAMING_SNAKE_CASE_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ : Optional[Any] = np.intaa
lowerCAmelCase__ : Optional[Any] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ : Optional[Any] = np.floataa
lowerCAmelCase__ : Optional[Any] = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowerCAmelCase__ : Dict = chunk_length_s / 6
lowerCAmelCase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ):
lowerCAmelCase__ : Dict = [stride_length_s, stride_length_s]
lowerCAmelCase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ : Any = datetime.datetime.now()
lowerCAmelCase__ : Any = datetime.timedelta(seconds=SCREAMING_SNAKE_CASE_ )
for item in chunk_bytes_iter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=(stride_left, stride_right) , stream=SCREAMING_SNAKE_CASE_ ):
# Put everything back in numpy scale
lowerCAmelCase__ : Any = np.frombuffer(item['raw'] , dtype=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = b''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowerCAmelCase__ : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(SCREAMING_SNAKE_CASE_ ) < chunk_len:
lowerCAmelCase__ : Tuple = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(SCREAMING_SNAKE_CASE_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ : Dict = (_stride_left, stride_right)
lowerCAmelCase__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase__ : Optional[int] = False
yield item
lowerCAmelCase__ : Optional[int] = stride_left
lowerCAmelCase__ : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(SCREAMING_SNAKE_CASE_ ) > stride_left:
lowerCAmelCase__ : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase__ : Any = False
yield item
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE , bufsize=SCREAMING_SNAKE_CASE_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ : List[str] = ffmpeg_process.stdout.read(SCREAMING_SNAKE_CASE_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 212 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = []
for line in lines:
_a = re.sub(R'''#.*''', '''''', _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
_a = '''\n'''.join(_lowerCAmelCase )
# Make a hash from all this code
_a = full_str.encode('''utf-8''' )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
__snake_case = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__snake_case = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__snake_case = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__snake_case = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''') | 153 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> str:
_a = '''ylacombe/bark-small'''
_a = tempfile.mkdtemp()
_a = '''en_speaker_1'''
_a = '''This is a test string'''
_a = '''speaker_embeddings_path.json'''
_a = '''speaker_embeddings'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _UpperCAmelCase ( self ) -> str:
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a = 35
_a = 2
_a = 8
_a = {
'''semantic_prompt''': np.ones(__UpperCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__UpperCAmelCase , **__UpperCAmelCase )
_a = processor(text=self.input_string , voice_preset=__UpperCAmelCase )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a = processor(text=self.input_string , voice_preset=self.voice_preset )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=__UpperCAmelCase )
_a = processor(text=self.input_string )
_a = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 153 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE__ : Tuple = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(os.path.expanduser('~'), '.cache')
SCREAMING_SNAKE_CASE__ : str = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE ,REMOTE_MODEL_PATHS[key]["file_name"] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
os.makedirs(_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE ,filename=_SCREAMING_SNAKE_CASE ,local_dir=_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]:
if model_type == "text":
lowerCamelCase : Optional[int] = BarkSemanticModel
lowerCamelCase : int = BarkSemanticConfig
lowerCamelCase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase : Optional[Any] = BarkCoarseModel
lowerCamelCase : List[str] = BarkCoarseConfig
lowerCamelCase : str = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase : Any = BarkFineModel
lowerCamelCase : List[Any] = BarkFineConfig
lowerCamelCase : Union[str, Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase : int = f'''{model_type}_small''' if use_small else model_type
lowerCamelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["repo_id"] ,model_info["file_name"] )
lowerCamelCase : Tuple = torch.load(_SCREAMING_SNAKE_CASE ,map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
lowerCamelCase : List[Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
lowerCamelCase : Optional[int] = model_args["vocab_size"]
lowerCamelCase : Dict = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase : Union[str, Any] = model_args.pop("n_head" )
lowerCamelCase : List[Any] = model_args.pop("n_embd" )
lowerCamelCase : List[Any] = model_args.pop("n_layer" )
lowerCamelCase : int = ConfigClass(**checkpoint["model_args"] )
lowerCamelCase : Optional[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = GenerationConfigClass()
lowerCamelCase : Dict = model_generation_config
lowerCamelCase : Optional[Any] = checkpoint["model"]
# fixup checkpoint
lowerCamelCase : List[str] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase : Union[str, Any] = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase : List[Any] = new_k.replace(_SCREAMING_SNAKE_CASE ,new_layer_name_dict[old_layer_name] )
lowerCamelCase : int = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase : Dict = {k for k in extra_keys if not k.endswith(".attn.bias" )}
lowerCamelCase : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase : Tuple = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(_SCREAMING_SNAKE_CASE ,strict=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = checkpoint["best_val_loss"].item()
logger.info(f'''model loaded: {round(n_params/1e6 ,1 )}M params, {round(_SCREAMING_SNAKE_CASE ,3 )} loss''' )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="text" ) -> Optional[int]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase : Optional[Any] = "cpu" # do conversion on cpu
lowerCamelCase : Tuple = _get_ckpt_path(_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = _load_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
lowerCamelCase : Optional[int] = _bark_load_model(_SCREAMING_SNAKE_CASE ,"cpu" ,model_type=_SCREAMING_SNAKE_CASE ,use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
lowerCamelCase : int = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
lowerCamelCase : Optional[int] = 5
lowerCamelCase : int = 10
if model_type in ["text", "coarse"]:
lowerCamelCase : Union[str, Any] = torch.randint(256 ,(batch_size, sequence_length) ,dtype=torch.int )
lowerCamelCase : Tuple = bark_model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
# take last logits
lowerCamelCase : str = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase : str = 3
lowerCamelCase : Union[str, Any] = 8
lowerCamelCase : Optional[int] = torch.randint(256 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
lowerCamelCase : int = model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = bark_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[str]:
lowerCamelCase : Dict = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Optional[int] = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Tuple = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE ,"config.json" ) )
lowerCamelCase : Tuple = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase : str = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
lowerCamelCase : Union[str, Any] = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
lowerCamelCase : List[str] = BarkModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = semantic
lowerCamelCase : Optional[Any] = coarseAcoustic
lowerCamelCase : Union[str, Any] = fineAcoustic
lowerCamelCase : str = codec
lowerCamelCase : Tuple = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE ,repo_id=_SCREAMING_SNAKE_CASE ,push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 48 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = ObjectDetectionPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0)
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
import datasets
__A : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
__A : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__A : Union[str, Any] = object_detector(_UpperCAmelCase , threshold=0.0)
self.assertEqual(len(_UpperCAmelCase) , len(_UpperCAmelCase))
for outputs in batch_outputs:
self.assertGreater(len(_UpperCAmelCase) , 0)
for detected_object in outputs:
self.assertEqual(
_UpperCAmelCase , {
'score': ANY(_UpperCAmelCase),
'label': ANY(_UpperCAmelCase),
'box': {'xmin': ANY(_UpperCAmelCase), 'ymin': ANY(_UpperCAmelCase), 'xmax': ANY(_UpperCAmelCase), 'ymax': ANY(_UpperCAmelCase)},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__A : Any = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : int = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : Any = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
__A : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = 'facebook/detr-resnet-50'
__A : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(_UpperCAmelCase)
__A : Tuple = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase)
__A : List[Any] = ObjectDetectionPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase)
__A : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 'facebook/detr-resnet-50'
__A : str = pipeline('object-detection' , model=_UpperCAmelCase)
__A : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__A : str = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = 0.9985
__A : List[Any] = 'facebook/detr-resnet-50'
__A : List[str] = pipeline('object-detection' , model=_UpperCAmelCase)
__A : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_UpperCAmelCase)
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
__A : Tuple = 0.9993
__A : str = pipeline('object-detection' , model=_UpperCAmelCase , threshold=_UpperCAmelCase)
__A : Optional[int] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , ) | 190 | 0 |
def A ( lowercase , lowercase ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_UpperCAmelCase : int = "hopper-medium-v2"
_UpperCAmelCase : Tuple = gym.make(env_name)
_UpperCAmelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_UpperCAmelCase : Optional[Any] = env.reset()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Dict = 1_000
_UpperCAmelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = env.step(denorm_actions)
_UpperCAmelCase : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 110 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionXLImgaImgPipeline
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"""latents"""}
_SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
UpperCamelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
UpperCamelCase = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=0 ):
"""simple docstring"""
UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Union[str, Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A ( self : List[Any] ):
"""simple docstring"""
pass
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
# forward without prompt embeds
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = 3 * ['''this is a negative prompt''']
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs['''prompt''']]
UpperCamelCase = sd_pipe(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = 3 * ['''this is a negative prompt''']
UpperCamelCase = 3 * [inputs.pop('prompt' )]
(
UpperCamelCase
) = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = sd_pipe(
**SCREAMING_SNAKE_CASE_ , prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]="cpu" , UpperCamelCase__ : Tuple=torch.floataa , UpperCamelCase__ : Union[str, Any]=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 4, 6_4, 6_4) )
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = self.get_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
UpperCamelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCamelCase_ : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase_ : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
UpperCamelCase_ : bool = field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A: Tuple = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
A: str = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]:
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())]
assert len(__lowercase ) == len(__lowercase )
A: Optional[int] = {c: dataset[c] for c in dataset.column_names}
A: Union[str, Any] = refs
return Dataset.from_dict(__lowercase )
def SCREAMING_SNAKE_CASE( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A: List[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
A: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
A: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
A: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
A: Any = {}
if data_args.train_file is not None:
A: int = data_args.train_file
if data_args.validation_file is not None:
A: Optional[int] = data_args.validation_file
A: List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
A: int = '''text'''
A: Any = load_dataset(__lowercase , data_files=__lowercase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A: Dict = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase )
elif model_args.model_name_or_path:
A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
A: str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
A: Tuple = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase )
elif model_args.model_name_or_path:
A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
A: List[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
A: int = datasets['''train'''].column_names
else:
A: str = datasets['''validation'''].column_names
A: Tuple = '''text''' if '''text''' in column_names else column_names[0]
A: List[str] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowercase ):
# Remove empty lines
A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length )
A: str = datasets.map(
__lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
A: Dict = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
A: List[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
A: Optional[int] = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
A: Optional[int] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
A: str = model_args.model_name_or_path
else:
A: List[str] = None
A: str = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
A: Optional[int] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A: Optional[Any] = trainer.evaluate()
A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] )
A: Dict = perplexity
A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowercase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 319 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''WhisperFeatureExtractor'''
a__ ='''WhisperTokenizer'''
def __init__( self , A , A ) -> Any:
super().__init__(A , A )
_UpperCAmelCase : int = self.feature_extractor
_UpperCAmelCase : List[str] = False
def __lowerCAmelCase ( self , A=None , A=None , A=True ) -> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
_UpperCAmelCase : str = kwargs.pop('''audio''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''sampling_rate''' , A )
_UpperCAmelCase : Dict = kwargs.pop('''text''' , A )
if len(A ) > 0:
_UpperCAmelCase : List[Any] = args[0]
_UpperCAmelCase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_UpperCAmelCase : Optional[Any] = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
_UpperCAmelCase : Any = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCAmelCase : int = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCAmelCase ( self , *A , **A ) -> Any:
return self.tokenizer.decode(*A , **A )
def __lowerCAmelCase ( self , A , A="np" ) -> Any:
return self.tokenizer.get_prompt_ids(A , return_tensors=A )
| 357 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68 | 0 |
'''simple docstring'''
import random
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
lowerCamelCase_ =num - 1
lowerCamelCase_ =0
while s % 2 == 0:
lowerCamelCase_ =s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ =random.randrange(2 , num - 1 )
lowerCamelCase_ =pow(__snake_case , __snake_case , __snake_case )
if v != 1:
lowerCamelCase_ =0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ =i + 1
lowerCamelCase_ =(v**2) % num
return True
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowerCamelCase_ =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__snake_case )
def a_ ( __snake_case : int = 1024 ) -> int:
"""simple docstring"""
while True:
lowerCamelCase_ =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__snake_case ):
return num
if __name__ == "__main__":
a_ : Dict = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 75 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = tempfile.mkdtemp()
__a : List[str] = SamImageProcessor()
__a : int = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Tuple = SamProcessor(image_processor=__a )
__a : Tuple = self.prepare_image_inputs()
__a : Union[str, Any] = image_processor(__a , return_tensors='np' )
__a : Tuple = processor(images=__a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : List[str] = SamProcessor(image_processor=__a )
__a : Tuple = [torch.ones((1, 3, 5, 5) )]
__a : int = [[1764, 2646]]
__a : Optional[int] = [[683, 1024]]
__a : Union[str, Any] = processor.post_process_masks(__a , __a , __a )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : int = processor.post_process_masks(
__a , torch.tensor(__a ) , torch.tensor(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__a : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__a : Optional[Any] = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(__a ):
__a : Optional[int] = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
__a : Union[str, Any] = SamImageProcessor()
__a : str = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[str] = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Dict = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : str = SamProcessor(image_processor=__a )
__a : List[Any] = self.prepare_image_inputs()
__a : Optional[Any] = image_processor(__a , return_tensors='np' )
__a : int = processor(images=__a , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : int = SamProcessor(image_processor=__a )
__a : Optional[int] = [tf.ones((1, 3, 5, 5) )]
__a : Optional[Any] = [[1764, 2646]]
__a : Union[str, Any] = [[683, 1024]]
__a : Any = processor.post_process_masks(__a , __a , __a , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : int = processor.post_process_masks(
__a , tf.convert_to_tensor(__a ) , tf.convert_to_tensor(__a ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__a : List[str] = [np.ones((1, 3, 5, 5) )]
__a : str = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__a : Optional[int] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__a : Optional[Any] = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors='tf' )
@require_vision
@require_torchvision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = tempfile.mkdtemp()
__a : int = SamImageProcessor()
__a : Tuple = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Any = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.get_image_processor()
__a : str = SamProcessor(image_processor=__a )
__a : List[str] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__a : Tuple = [tf.convert_to_tensor(__a )]
__a : Any = [torch.tensor(__a )]
__a : List[Any] = [[1764, 2646]]
__a : Optional[int] = [[683, 1024]]
__a : Optional[int] = processor.post_process_masks(
__a , __a , __a , return_tensors='tf' )
__a : int = processor.post_process_masks(
__a , __a , __a , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_image_processor()
__a : Dict = SamProcessor(image_processor=__a )
__a : Optional[Any] = self.prepare_image_inputs()
__a : Any = image_processor(__a , return_tensors='pt' )['pixel_values'].numpy()
__a : str = processor(images=__a , return_tensors='pt' )['pixel_values'].numpy()
__a : List[Any] = image_processor(__a , return_tensors='tf' )['pixel_values'].numpy()
__a : Tuple = processor(images=__a , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
| 294 |
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 294 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
A_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A_ : Any = 256
class A_ ( _a ):
'''simple docstring'''
a__ = ["melgan"]
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> None:
super().__init__()
# From MELGAN
__UpperCAmelCase = math.log(1E-5 ) # Matches MelGAN training.
__UpperCAmelCase = 4.0 # Largest value for most examples
__UpperCAmelCase = 128
self.register_modules(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__=(-1.0, 1.0) , lowercase__=False ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = output_range
if clip:
__UpperCAmelCase = torch.clip(lowercase__ , self.min_value , self.max_value )
# Scale to [0, 1].
__UpperCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase_ (self , lowercase__ , lowercase__=(-1.0, 1.0) , lowercase__=False ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = input_range
__UpperCAmelCase = torch.clip(lowercase__ , lowercase__ , lowercase__ ) if clip else outputs
# Scale to [0, 1].
__UpperCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = input_tokens > 0
__UpperCAmelCase , __UpperCAmelCase = self.notes_encoder(
encoder_input_tokens=lowercase__ , encoder_inputs_mask=lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = self.continuous_encoder(
encoder_inputs=lowercase__ , encoder_inputs_mask=lowercase__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> str:
__UpperCAmelCase = noise_time
if not torch.is_tensor(lowercase__ ):
__UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase__ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase = self.decoder(
encodings_and_masks=lowercase__ , decoder_input_tokens=lowercase__ , decoder_noise_time=lowercase__ )
return logits
@torch.no_grad()
def __call__(self , lowercase__ , lowercase__ = None , lowercase__ = 100 , lowercase__ = True , lowercase__ = "numpy" , lowercase__ = None , lowercase__ = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase__ )}.''' )
__UpperCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__UpperCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
__UpperCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase__ ):
if i == 0:
__UpperCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__UpperCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCAmelCase = ones
__UpperCAmelCase = self.scale_features(
lowercase__ , output_range=[-1.0, 1.0] , clip=lowercase__ )
__UpperCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase__ , continuous_mask=lowercase__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.decode(
encodings_and_masks=lowercase__ , input_tokens=lowercase__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
__UpperCAmelCase = self.scale_to_features(lowercase__ , input_range=[-1.0, 1.0] )
__UpperCAmelCase = mel[:1]
__UpperCAmelCase = mel.cpu().float().numpy()
__UpperCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ )
logger.info('''Generated segment''' , lowercase__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__UpperCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__UpperCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase__ )
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import math
def __lowerCamelCase ( __magic_name__ : float , __magic_name__ : float ):
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __lowerCamelCase ( __magic_name__ : float , __magic_name__ : float ):
if (
not isinstance(__magic_name__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = 50_00
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : int ):
for i in range(__magic_name__ ):
a__: int =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
for i in range(0 , len(__magic_name__ ) , __magic_name__ ):
a__: List[str] =dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(__magic_name__ ):
a__: Optional[Any] =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(0 , __magic_name__ , __magic_name__ ):
a__: List[Any] =dataset[i : i + batch_size]
def __lowerCamelCase ( ):
a__: Union[str, Any] ={"num examples": SPEED_TEST_N_EXAMPLES}
a__: int =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a__: Optional[Any] =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a__: str =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a__: List[str] =generate_example_dataset(
os.path.join(__magic_name__ , "dataset.arrow" ) , __magic_name__ , num_examples=__magic_name__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__magic_name__ ) )
a__: str =func(__magic_name__ , **__magic_name__ )
print("shuffling dataset" )
a__: List[str] =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__magic_name__ ) )
a__: Optional[int] =func(
__magic_name__ , **__magic_name__ )
with open(__magic_name__ , "wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 42 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : str = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ["""ViTFeatureExtractor"""]
_SCREAMING_SNAKE_CASE : str = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 125 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCAmelCase (unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(UpperCAmelCase_ )
@classmethod
def UpperCamelCase ( cls: Union[str, Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase_ , repo_id="""test-model-flax""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F'{key} not identical' )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
_SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
_SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 , msg=F'{key} not identical' )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
_SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
_SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , max_shard_size="""10KB""" )
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertTrue(check_models_equal(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """bert"""
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """bert"""
_SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase_ , subfolder=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 125 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowerCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowerCAmelCase_ : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_)
lowerCAmelCase_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0)
lowerCAmelCase_ : str = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : str = num_samples * [init_image]
lowerCAmelCase_ : Union[str, Any] = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(A_ , A_ , A_)
# shard inputs and rng
lowerCAmelCase_ : str = replicate(A_)
lowerCAmelCase_ : str = jax.random.split(A_ , jax.device_count())
lowerCAmelCase_ : List[Any] = shard(A_)
lowerCAmelCase_ : str = shard(A_)
lowerCAmelCase_ : Tuple = shard(A_)
lowerCAmelCase_ : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_)
lowerCAmelCase_ : Optional[int] = output.images.reshape(A_ , 5_1_2 , 5_1_2 , 3)
lowerCAmelCase_ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase_ : str = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 103 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
a :Optional[int] = ["text", "image", "audio"]
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __a :
'''simple docstring'''
def _a ( self ) -> str:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tool(*_a )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [outputs]
self.assertListEqual(output_types(_a ) , self.tool.outputs )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : Dict = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
for output, output_type in zip(_a , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_a , _a ) )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _input, input_type in zip(_a , self.tool.inputs ):
if isinstance(_a , _a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(*_a )
if not isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [outputs]
self.assertEqual(len(_a ) , len(self.tool.outputs ) )
| 132 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _lowerCamelCase ( lowerCamelCase_ : np.ndarray ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
if rows != columns:
UpperCAmelCase_ : List[Any] = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = np.zeros((rows, columns) )
UpperCAmelCase_ : Dict = np.zeros((rows, columns) )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(lowerCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
UpperCAmelCase_ : Optional[Any] = (table[i][j] - total) / upper[j][j]
UpperCAmelCase_ : Optional[Any] = 1
for j in range(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Any = sum(lower[i][k] * upper[k][j] for k in range(lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ : Dict = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''albert'''
def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 274 | 1 |
SCREAMING_SNAKE_CASE__ : List[Any] = {str(digit): digit**5 for digit in range(10)}
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__lowerCAmelCase ) )
def __magic_name__ ( ) -> int:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(__lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 270 |
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
_lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a_ ( __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : Dict , __lowercase : Any ) -> List[str]:
for attribute in key.split('.' ):
_snake_case = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_snake_case = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( __lowercase : Tuple , __lowercase : Any ) -> int:
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_snake_case = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_snake_case = "weight_g"
elif "weight_v" in name:
_snake_case = "weight_v"
elif "bias" in name:
_snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = "weight"
else:
_snake_case = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( __lowercase : Tuple , __lowercase : List[str] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
_snake_case = full_name.split('conv_layers.' )[-1]
_snake_case = name.split('.' )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def a_ ( __lowercase : List[str] , __lowercase : int , __lowercase : List[str]=None , __lowercase : int=None , __lowercase : str=True ) -> Tuple:
if config_path is not None:
_snake_case = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_snake_case = UniSpeechSatConfig()
_snake_case = ""
if is_finetuned:
_snake_case = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
_snake_case = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
_snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_snake_case = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 351 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : List[str] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : str ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : List[str] , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : Tuple , lowercase : Tuple ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main() | 130 | 0 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ = '''src/diffusers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCAmelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCAmelCase__ = '''
{0} = None
'''
lowerCAmelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
'''
lowerCAmelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _re_backend.findall(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(_SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
with open(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase = 0
UpperCamelCase = {}
# Go through the end of the file
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCamelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase = lines[line_index]
UpperCamelCase = _re_single_line_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase = objects
else:
line_index += 1
return backend_specific_objects
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase = "[" + ", ".join(F"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
UpperCamelCase = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase = dummy_file
return dummy_files
def a__ ( _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "utils" )
UpperCamelCase = {
backend: os.path.join(_SCREAMING_SNAKE_CASE , F"dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py" )
for backend in dummy_files.keys()
}
UpperCamelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase = f.read()
else:
UpperCamelCase = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F"diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 153 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
def __init__(self , *__a , **__a ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 153 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Optional[Any]=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def __a ( _UpperCamelCase: int , _UpperCamelCase: Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
_snake_case = []
for old_item in old_list:
_snake_case = old_item.replace("in_layers.0" , "norm1" )
_snake_case = new_item.replace("in_layers.2" , "conv1" )
_snake_case = new_item.replace("out_layers.0" , "norm2" )
_snake_case = new_item.replace("out_layers.3" , "conv2" )
_snake_case = new_item.replace("emb_layers.1" , "time_emb_proj" )
_snake_case = new_item.replace("skip_connection" , "conv_shortcut" )
_snake_case = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Dict=0 ) -> int:
"""simple docstring"""
_snake_case = []
for old_item in old_list:
_snake_case = old_item
_snake_case = new_item.replace("norm.weight" , "group_norm.weight" )
_snake_case = new_item.replace("norm.bias" , "group_norm.bias" )
_snake_case = new_item.replace("proj_out.weight" , "proj_attn.weight" )
_snake_case = new_item.replace("proj_out.bias" , "proj_attn.bias" )
_snake_case = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def __a ( _UpperCamelCase: Any , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Tuple , _UpperCamelCase: Optional[Any]=None , _UpperCamelCase: Any=None , _UpperCamelCase: Any=None ) -> Optional[int]:
"""simple docstring"""
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case = old_checkpoint[path]
_snake_case = old_tensor.shape[0] // 3
_snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case = old_tensor.split(channels // num_heads , dim=1 )
_snake_case = query.reshape(snake_case_ )
_snake_case = key.reshape(snake_case_ )
_snake_case = value.reshape(snake_case_ )
for path in paths:
_snake_case = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
_snake_case = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
_snake_case = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case = old_checkpoint[path["""old"""]][:, :, 0]
else:
_snake_case = old_checkpoint[path["""old"""]]
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Dict ) -> List[str]:
"""simple docstring"""
_snake_case = {}
_snake_case = checkpoint["""time_embed.0.weight"""]
_snake_case = checkpoint["""time_embed.0.bias"""]
_snake_case = checkpoint["""time_embed.2.weight"""]
_snake_case = checkpoint["""time_embed.2.bias"""]
_snake_case = checkpoint["""input_blocks.0.0.weight"""]
_snake_case = checkpoint["""input_blocks.0.0.bias"""]
_snake_case = checkpoint["""out.0.weight"""]
_snake_case = checkpoint["""out.0.bias"""]
_snake_case = checkpoint["""out.2.weight"""]
_snake_case = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_snake_case = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if F"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
_snake_case = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if F"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
_snake_case = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if F"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
_snake_case = (i - 1) // (config["""num_res_blocks"""] + 1)
_snake_case = (i - 1) % (config["""num_res_blocks"""] + 1)
_snake_case = [key for key in input_blocks[i] if F"""input_blocks.{i}.0""" in key]
_snake_case = [key for key in input_blocks[i] if F"""input_blocks.{i}.1""" in key]
if F"""input_blocks.{i}.0.op.weight""" in checkpoint:
_snake_case = checkpoint[
F"""input_blocks.{i}.0.op.weight"""
]
_snake_case = checkpoint[
F"""input_blocks.{i}.0.op.bias"""
]
continue
_snake_case = renew_resnet_paths(snake_case_ )
_snake_case = {"""old""": F"""input_blocks.{i}.0""", """new""": F"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_snake_case = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
_snake_case = renew_attention_paths(snake_case_ )
_snake_case = {
"""old""": F"""input_blocks.{i}.1""",
"""new""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
F"""input_blocks.{i}.1.qkv.bias""": {
"""key""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""input_blocks.{i}.1.qkv.weight""": {
"""key""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": F"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
_snake_case = middle_blocks[0]
_snake_case = middle_blocks[1]
_snake_case = middle_blocks[2]
_snake_case = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
_snake_case = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
_snake_case = renew_attention_paths(snake_case_ )
_snake_case = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
_snake_case = i // (config["""num_res_blocks"""] + 1)
_snake_case = i % (config["""num_res_blocks"""] + 1)
_snake_case = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
_snake_case = {}
for layer in output_block_layers:
_snake_case = layer.split("." )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
_snake_case = [layer_name]
if len(snake_case_ ) > 1:
_snake_case = [key for key in output_blocks[i] if F"""output_blocks.{i}.0""" in key]
_snake_case = [key for key in output_blocks[i] if F"""output_blocks.{i}.1""" in key]
_snake_case = renew_resnet_paths(snake_case_ )
_snake_case = renew_resnet_paths(snake_case_ )
_snake_case = {"""old""": F"""output_blocks.{i}.0""", """new""": F"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
_snake_case = checkpoint[
F"""output_blocks.{i}.{index}.conv.weight"""
]
_snake_case = checkpoint[
F"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
_snake_case = []
if len(snake_case_ ):
_snake_case = renew_attention_paths(snake_case_ )
_snake_case = {
"""old""": F"""output_blocks.{i}.1""",
"""new""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
F"""output_blocks.{i}.1.qkv.bias""": {
"""key""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
F"""output_blocks.{i}.1.qkv.weight""": {
"""key""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": F"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=snake_case_ , )
else:
_snake_case = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case = """.""".join(["output_blocks", str(snake_case_ ), path["old"]] )
_snake_case = """.""".join(["up_blocks", str(snake_case_ ), "resnets", str(snake_case_ ), path["new"]] )
_snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
UpperCamelCase_ : str = parser.parse_args()
UpperCamelCase_ : int = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase_ : str = json.loads(f.read())
UpperCamelCase_ : int = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase_ : int = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase_ : Optional[int] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
UpperCamelCase_ : str = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
UpperCamelCase_ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 355 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def __a ( _UpperCamelCase: Callable[[int | float], int | float] , _UpperCamelCase: int | float , _UpperCamelCase: int | float , _UpperCamelCase: int = 100 , ) -> float:
"""simple docstring"""
_snake_case = x_start
_snake_case = fnc(_UpperCamelCase )
_snake_case = 0.0
for _ in range(_UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case = (x_end - x_start) / steps + xa
_snake_case = fnc(_UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_snake_case = xa
_snake_case = fxa
return area
if __name__ == "__main__":
def __a ( _UpperCamelCase: Any ) -> Optional[int]:
"""simple docstring"""
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
UpperCamelCase_ : Optional[int] = 10
while i <= 100000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10
| 142 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
"""simple docstring"""
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _a ( UpperCamelCase__ ):
def __init__( self: str , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase__ = latents.to(UpperCamelCase_ )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int=0 ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int=0 ) -> List[str]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 512 , UpperCamelCase_: int = 100 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {'''image_embeds''': image_embeds}
lowercase__ = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
lowercase__ = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 110 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 0 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase = np.max(SCREAMING_SNAKE_CASE_ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE_ )
class a__ ( UpperCAmelCase__ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **a : str ):
"""simple docstring"""
__lowerCamelCase = {}
if "second_text" in kwargs:
__lowerCamelCase = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[str] , a : Tuple=None ):
"""simple docstring"""
return self.tokenizer(a , text_pair=a , return_tensors=self.framework )
def SCREAMING_SNAKE_CASE__ ( self : int , a : Tuple ):
"""simple docstring"""
return self.model(**a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = model_outputs.logits[0].numpy()
__lowerCamelCase = softmax(a )
__lowerCamelCase = np.argmax(a )
__lowerCamelCase = self.model.config.idalabel[best_class]
__lowerCamelCase = probabilities[best_class].item()
__lowerCamelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 67 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowerCAmelCase__ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 68 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = os.path.join(args.tf_model_dir , "parameters.json" )
UpperCAmelCase_ = json.loads(open(lowerCAmelCase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
UpperCAmelCase_ = args.output + ".pt"
UpperCAmelCase_ = OrderedDict()
with tf.device("/CPU:0" ):
UpperCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ = reader.get_tensor(lowerCAmelCase__ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
UpperCAmelCase_ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
UpperCAmelCase_ = 8
UpperCAmelCase_ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/moe" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/softmlp/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
UpperCAmelCase_ = key_name[-9:-7]
for i in range(16 ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
UpperCAmelCase_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/mlp" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p1/bias" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p2/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/p2/bias" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/ln" ):
UpperCAmelCase_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.norm.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ = "model.blocks.%d.feed_forward.norm.weight" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/att" ):
UpperCAmelCase_ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
UpperCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ = state[:, 0, :, :]
UpperCAmelCase_ = state[:, 1, :, :]
UpperCAmelCase_ = state[:, 2, :, :]
UpperCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/o/kernel" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
UpperCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/an" ):
UpperCAmelCase_ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.norm.bias" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ = "model.blocks.%d.self_attn.norm.weight" % player
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
UpperCAmelCase_ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
UpperCAmelCase_ = "model.%s.weight" % nlayer
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
if key_name.startswith("model/wte" ):
UpperCAmelCase_ = "lm_head.weight"
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name.startswith("model/wob" ):
UpperCAmelCase_ = "final_logits_bias"
UpperCAmelCase_ = vnp.copy() # same in embedded
UpperCAmelCase_ = state.reshape((1, -1) )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ = "model.last_project.weight"
UpperCAmelCase_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ = "model.last_project.bias"
UpperCAmelCase_ = vnp.copy() # same because it is one dimensional
UpperCAmelCase_ = torch.tensor(lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , args.output )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
lowerCamelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 241 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Optional[int] = '''encodec'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , UpperCAmelCase__ : Tuple=24000 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=128 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : int=[8, 5, 4, 2] , UpperCAmelCase__ : int="weight_norm" , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str="reflect" , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : int=1024 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
_a : Optional[Any] = target_bandwidths
_a : str = sampling_rate
_a : Any = audio_channels
_a : Union[str, Any] = normalize
_a : Optional[Any] = chunk_length_s
_a : Dict = overlap
_a : int = hidden_size
_a : List[str] = num_filters
_a : Any = num_residual_layers
_a : Tuple = upsampling_ratios
_a : List[str] = norm_type
_a : List[str] = kernel_size
_a : List[str] = last_kernel_size
_a : int = residual_kernel_size
_a : Tuple = dilation_growth_rate
_a : Union[str, Any] = use_causal_conv
_a : List[str] = pad_mode
_a : Dict = compress
_a : str = num_lstm_layers
_a : Any = trim_right_ratio
_a : Optional[Any] = codebook_size
_a : Dict = codebook_dim if codebook_dim is not None else hidden_size
_a : Tuple = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCAmelCase__ )
@property
def _lowercase ( self : Any ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase ( self : str ) -> int:
_a : List[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase ( self : Any ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 294 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 1 |
from __future__ import annotations
def A ( lowercase , lowercase , lowercase , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase : Optional[Any] = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
_UpperCAmelCase : int = "hopper-medium-v2"
_UpperCAmelCase : Tuple = gym.make(env_name)
_UpperCAmelCase : Any = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
_UpperCAmelCase : Optional[Any] = env.reset()
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Dict = 1_000
_UpperCAmelCase : Tuple = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase : int = pipeline(obs, planning_horizon=32)
# execute action in environment
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = env.step(denorm_actions)
_UpperCAmelCase : int = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 110 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if not self.initialized:
lowerCAmelCase : str = RagRetriever(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
lowerCAmelCase : Tuple = True
def lowercase__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.retriever._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(lowerCAmelCase_ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
lowerCAmelCase : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for worker in self.retrieval_workers
] )
def lowercase__ ( self ):
"""simple docstring"""
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCAmelCase : str = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCAmelCase , lowerCAmelCase : str = ray.get(random_worker.retrieve.remote(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
lowerCAmelCase , lowerCAmelCase : List[Any] = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
return super(lowerCAmelCase_ , cls ).get_tokenizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__=None , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = kwargs.pop("config" , lowerCAmelCase_ ) or RagConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCAmelCase : Any = RagTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
lowerCAmelCase : Optional[Any] = rag_tokenizer.question_encoder
lowerCAmelCase : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase : Tuple = "custom"
lowerCAmelCase : Optional[int] = CustomHFIndex(config.retrieval_vector_size , lowerCAmelCase_ )
else:
lowerCAmelCase : str = cls._build_index(lowerCAmelCase_ )
return cls(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , retrieval_workers=lowerCAmelCase_ , index=lowerCAmelCase_ , )
| 108 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42 | 0 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = False , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.01 , lowerCAmelCase__ = 1 , ):
'''simple docstring'''
lowercase = False
lowercase = search_prob
lowercase = start_temperate
lowercase = []
lowercase = 0
lowercase = None
while not search_end:
lowercase = current_state.score()
if best_state is None or current_score > best_state.score():
lowercase = current_state
scores.append(lowerCAmelCase__ )
iterations += 1
lowercase = None
lowercase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) # picking a random neighbor
lowercase = neighbors.pop(lowerCAmelCase__ )
lowercase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase = picked_neighbor
else:
lowercase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase = picked_neighbor
lowercase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase = True
else:
lowercase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase__ ) , lowerCAmelCase__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase__ :Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowercase__ :int = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ :str = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowercase__ :Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ :Union[str, Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
lowercase__ :List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ :List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F'{local_min.score()}'
)
| 361 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=7 ,A__=3 ,A__=3_0 ,A__=4_0_0 ,A__=True ,A__=None ,A__=True ,A__=[0.5, 0.5, 0.5] ,A__=[0.5, 0.5, 0.5] ,A__=True ,A__=1 / 2_5_5 ,A__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def A__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self ,A__ ,A__=False):
if not batched:
lowercase = image_inputs[0]
if isinstance(A__ ,Image.Image):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size['''shortest_edge'''] * h / w)
lowercase = self.size['''shortest_edge''']
elif w > h:
lowercase = self.size['''shortest_edge''']
lowercase = int(self.size['''shortest_edge'''] * w / h)
else:
lowercase = self.size['''shortest_edge''']
lowercase = self.size['''shortest_edge''']
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase = max(A__ ,key=lambda A__: item[0])[0]
lowercase = max(A__ ,key=lambda A__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self):
lowercase = ConditionalDetrImageProcessingTester(self)
@property
def A__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self):
lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A__ ,'''image_mean'''))
self.assertTrue(hasattr(A__ ,'''image_std'''))
self.assertTrue(hasattr(A__ ,'''do_normalize'''))
self.assertTrue(hasattr(A__ ,'''do_resize'''))
self.assertTrue(hasattr(A__ ,'''size'''))
def A__ ( self):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,A__)
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=A__)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2, '''longest_edge''': 8_4})
self.assertEqual(image_processor.do_pad ,A__)
def A__ ( self):
pass
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,Image.Image)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,numpify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,np.ndarray)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,torchify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,torch.Tensor)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def A__ ( self):
# prepare image and target
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowercase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''')
lowercase = image_processing(images=A__ ,annotations=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
@slow
def A__ ( self):
# prepare image, target and masks_path
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowercase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
lowercase = ConditionalDetrImageProcessor(format='''coco_panoptic''')
lowercase = image_processing(images=A__ ,annotations=A__ ,masks_path=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify masks
lowercase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,A__)
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
| 97 | 0 |
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : str = "" , __magic_name__ : bool = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase_ : Optional[Any] = is_leaf
UpperCAmelCase_ : Dict = prefix
def UpperCAmelCase__ ( self : int , __magic_name__ : str ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = 0
for q, w in zip(self.prefix , __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCAmelCase_ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase_ : Any = RadixNode(prefix=__magic_name__ , is_leaf=__magic_name__ )
else:
UpperCAmelCase_ : List[str] = self.nodes[word[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase_ : List[str] = remaining_prefix
UpperCAmelCase_ : List[Any] = self.nodes[matching_string[0]]
UpperCAmelCase_ : int = RadixNode(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[str] = aux_node
if remaining_word == "":
UpperCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.nodes.get(word[0] , __magic_name__ )
if not incoming_node:
return False
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.nodes.get(word[0] , __magic_name__ )
if not incoming_node:
return False
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCAmelCase_ : int = list(self.nodes.values() )[0]
UpperCAmelCase_ : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase_ : Any = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCAmelCase_ : Union[str, Any] = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase_ : List[str] = list(incoming_node.nodes.values() )[0]
UpperCAmelCase_ : List[Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase_ : int = merging_node.nodes
return True
def UpperCAmelCase__ ( self : int , __magic_name__ : int = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase_ ( ) -> bool:
UpperCAmelCase_ : Any = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase_ : Optional[int] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ ( ) -> None:
assert test_trie()
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : Dict = RadixNode()
UpperCAmelCase_ : int = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print('''Words:''', SCREAMING_SNAKE_CASE__ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 125 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
def __init__( self : Union[str, Any] , __magic_name__ : Dict=2 , __magic_name__ : Dict=3 , __magic_name__ : Any=64 , __magic_name__ : List[Any]=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = np.random.default_rng(__magic_name__ )
UpperCAmelCase_ : Dict = length
UpperCAmelCase_ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase_ : str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self : List[Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __a (torch.nn.Module ):
def __init__( self : Optional[int] , __magic_name__ : Union[str, Any]=0 , __magic_name__ : List[str]=0 , __magic_name__ : List[str]=False ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase_ : Optional[int] = True
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
UpperCAmelCase_ : Optional[Any] = False
return x * self.a[0] + self.b[0]
class __a (torch.nn.Module ):
def __init__( self : Any , __magic_name__ : Any=0 , __magic_name__ : List[str]=0 , __magic_name__ : Any=False ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(torch.tensor(__magic_name__ ).float() )
UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(__magic_name__ ).float() )
UpperCAmelCase_ : Tuple = True
def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
UpperCAmelCase_ : Dict = False
return x * self.a + self.b
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : int = 16 ) -> List[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ : Optional[Any] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = datasets['''train'''].unique('''label''' )
UpperCAmelCase_ : int = {v: i for i, v in enumerate(SCREAMING_SNAKE_CASE__ )}
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(
examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, padding='''max_length''' )
if "label" in examples:
UpperCAmelCase_ : List[str] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ : Tuple = datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''sentence1''', '''sentence2''', '''label'''], )
def collate_fn(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ : Tuple = DataLoader(tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=2 )
UpperCAmelCase_ : Optional[int] = DataLoader(tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=1 )
return train_dataloader, eval_dataloader
| 125 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
snake_case_ : Optional[Any] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : str = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case_ : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case_ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
snake_case_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case_ : int = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A__ , A__ )
def _snake_case ( self : Tuple , **lowercase_ : Tuple ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def _snake_case ( self : str , **lowercase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A__ )
def _snake_case ( self : Dict , **lowercase_ : Optional[Any] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def _snake_case ( self : Any ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : List[str] ):
snake_case_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : Union[str, Any] = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : str = self.get_rust_tokenizer()
snake_case_ : Any = self.get_image_processor()
snake_case_ : int = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
snake_case_ : Optional[int] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Dict = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ : Tuple = self.get_image_processor(do_normalize=A__ )
snake_case_ : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def _snake_case ( self : Dict ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Optional[Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
snake_case_ : int = self.prepare_image_inputs()
snake_case_ : Dict = image_processor(A__ , return_tensors='''np''' )
snake_case_ : Dict = processor(images=A__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Union[str, Any] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
snake_case_ : Dict = '''lower newer'''
snake_case_ : List[Any] = processor(text=A__ , return_tensors='''np''' )
snake_case_ : Union[str, Any] = tokenizer(A__ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self : Optional[Any] ):
snake_case_ : int = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : str = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
snake_case_ : Tuple = '''lower newer'''
snake_case_ : Union[str, Any] = self.prepare_image_inputs()
snake_case_ : List[str] = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _snake_case ( self : Optional[Any] ):
snake_case_ : List[str] = '''google/owlvit-base-patch32'''
snake_case_ : Any = OwlViTProcessor.from_pretrained(A__ )
snake_case_ : Dict = ['''cat''', '''nasa badge''']
snake_case_ : List[str] = processor(text=A__ )
snake_case_ : str = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[Any] = '''google/owlvit-base-patch32'''
snake_case_ : Optional[int] = OwlViTProcessor.from_pretrained(A__ )
snake_case_ : Tuple = [['''cat''', '''nasa badge'''], ['''person''']]
snake_case_ : Dict = processor(text=A__ )
snake_case_ : str = 16
snake_case_ : Optional[int] = len(A__ )
snake_case_ : List[str] = max([len(A__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _snake_case ( self : Any ):
snake_case_ : Optional[int] = '''google/owlvit-base-patch32'''
snake_case_ : int = OwlViTProcessor.from_pretrained(A__ )
snake_case_ : Dict = ['''cat''', '''nasa badge''']
snake_case_ : int = processor(text=A__ )
snake_case_ : Optional[int] = 16
snake_case_ : List[Any] = inputs['''input_ids''']
snake_case_ : Union[str, Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self : Tuple ):
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : int = self.get_tokenizer()
snake_case_ : Any = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
snake_case_ : List[str] = self.prepare_image_inputs()
snake_case_ : List[Any] = self.prepare_image_inputs()
snake_case_ : Optional[int] = processor(images=A__ , query_images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = self.get_image_processor()
snake_case_ : str = self.get_tokenizer()
snake_case_ : List[str] = OwlViTProcessor(tokenizer=A__ , image_processor=A__ )
snake_case_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : List[Any] = processor.batch_decode(A__ )
snake_case_ : Union[str, Any] = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 359 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase__ : Any = None
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : Tuple = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase__ : Any = {
'''google/rembert''': 2_56,
}
lowercase__ : Optional[Any] = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Tuple = RemBertTokenizer
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : str="<unk>" , lowercase_ : Tuple="[SEP]" , lowercase_ : Optional[int]="<pad>" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , **lowercase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = do_lower_case
snake_case_ : List[Any] = remove_space
snake_case_ : str = keep_accents
snake_case_ : str = vocab_file
snake_case_ : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _snake_case ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : Union[str, Any] = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowercase_ ) )
return
snake_case_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 155 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
'''simple docstring'''
@staticmethod
def a_ ( *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
@require_torch
def a_ ( self : str ) -> List[Any]:
"""simple docstring"""
A__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCAmelCase ) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
[
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
{"""score""": 0.3_3_3, """label""": ANY(__lowerCAmelCase )},
],
] , )
@slow
@require_torch
def a_ ( self : Any ) -> Any:
"""simple docstring"""
A__ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ = image_classifier(__lowerCAmelCase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
A__ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 274 |
from sklearn.metrics import fa_score
import datasets
A : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A : List[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
A__ = fa_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase )
return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
| 274 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(__UpperCamelCase , exponent // 2 , __UpperCamelCase) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCamelCase , exponent - 1 , __UpperCamelCase)) % modulo_value
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 17_77 , __UpperCamelCase = 18_55 , __UpperCamelCase = 8) -> int:
a = base
for _ in range(1 , __UpperCamelCase):
a = _modexpt(__UpperCamelCase , __UpperCamelCase , 10**digits)
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 180 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
while b:
_snake_case , _snake_case : Optional[int] = b, a % b
return a
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b )
def UpperCAmelCase__ ():
"""simple docstring"""
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 64 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def snake_case ( self : str ):
super().setUp()
lowercase__ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self : List[str] ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def snake_case ( self : Union[str, Any] ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def snake_case ( self : str ):
lowercase__ : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : str = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase__ : Optional[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : Optional[int] ):
lowercase__ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIn("input_ids" , SCREAMING_SNAKE_CASE )
self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE )
self.assertNotIn("labels" , SCREAMING_SNAKE_CASE )
self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE )
@require_torch
def snake_case ( self : List[str] ):
lowercase__ : List[str] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Any = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case ( self : Optional[int] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def snake_case ( self : Tuple ):
lowercase__ : int = ["A long paragraph for summarization."]
lowercase__ : Union[str, Any] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Tuple = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : List[str] = tokenizer(text_target=SCREAMING_SNAKE_CASE , return_tensors="pt" )
lowercase__ : Union[str, Any] = inputs["input_ids"]
lowercase__ : List[str] = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase__ : Optional[int] = ["Summary of the text.", "Another summary."]
lowercase__ : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = [[0] * len(SCREAMING_SNAKE_CASE ) for x in encoded_output["input_ids"]]
lowercase__ : Optional[int] = tokenizer.pad(SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Any = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 130 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """efficientnet"""
def __init__( self : Tuple , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : int , ) -> Any:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Union[str, Any] = width_coefficient
lowerCamelCase__ : Optional[Any] = depth_coefficient
lowerCamelCase__ : Union[str, Any] = depth_divisor
lowerCamelCase__ : Dict = kernel_sizes
lowerCamelCase__ : Union[str, Any] = in_channels
lowerCamelCase__ : Dict = out_channels
lowerCamelCase__ : Dict = depthwise_padding
lowerCamelCase__ : int = strides
lowerCamelCase__ : List[str] = num_block_repeats
lowerCamelCase__ : Optional[Any] = expand_ratios
lowerCamelCase__ : List[str] = squeeze_expansion_ratio
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : int = hidden_dim
lowerCamelCase__ : int = pooling_type
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Any = batch_norm_eps
lowerCamelCase__ : List[Any] = batch_norm_momentum
lowerCamelCase__ : int = dropout_rate
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : List[Any] = sum(UpperCAmelCase ) * 4
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self : List[Any] ) -> float:
return 1e-5
| 45 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : bool , snake_case_ : bool ) -> Optional[Any]:
def run_func(snake_case_ : Union[str, Any] ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Any ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : List[str] , **snake_case_ : Any ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> ["tf.Tensor"]:
__snake_case = random.Random()
__snake_case = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : TensorFlowBenchmarkArguments
A_ : PretrainedConfig
A_ : str = "TensorFlow"
@property
def a (self : str ):
"""simple docstring"""
return tf.__version__
def a (self : Optional[int] , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__snake_case = self._prepare_inference_func(a__ , a__ , a__ )
return self._measure_speed(_inference )
def a (self : Dict , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__snake_case = self._prepare_train_func(a__ , a__ , a__ )
return self._measure_speed(_train )
def a (self : List[str] , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ )
__snake_case = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__snake_case = self._prepare_inference_func(a__ , a__ , a__ )
return self._measure_memory(_inference )
def a (self : Tuple , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ )
__snake_case = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
__snake_case = self._prepare_train_func(a__ , a__ , a__ )
return self._measure_memory(_train )
def a (self : Union[str, Any] , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__snake_case = (
hasattr(a__ , '''architectures''' )
and isinstance(config.architectures , a__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case = __import__('''transformers''' , fromlist=[model_class] )
__snake_case = getattr(a__ , a__ )
__snake_case = model_cls(a__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__snake_case = TF_MODEL_MAPPING[config.__class__](a__ )
# encoder-decoder has vocab size saved differently
__snake_case = config.vocab_size if hasattr(a__ , '''vocab_size''' ) else config.encoder.vocab_size
__snake_case = random_input_ids(a__ , a__ , a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a__ , decoder_input_ids=a__ , training=a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a__ , training=a__ )
__snake_case = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def a (self : Union[str, Any] , a__ : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
__snake_case = (
hasattr(a__ , '''architectures''' )
and isinstance(config.architectures , a__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__snake_case = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
__snake_case = __import__('''transformers''' , fromlist=[model_class] )
__snake_case = getattr(a__ , a__ )
__snake_case = model_cls(a__ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
__snake_case = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a__ )
# encoder-decoder has vocab size saved differently
__snake_case = config.vocab_size if hasattr(a__ , '''vocab_size''' ) else config.encoder.vocab_size
__snake_case = random_input_ids(a__ , a__ , a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__snake_case = model(a__ , decoder_input_ids=a__ , labels=a__ , training=a__ )[0]
__snake_case = tf.gradients(a__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__snake_case = model(a__ , labels=a__ , training=a__ )[0]
__snake_case = tf.gradients(a__ , model.trainable_variables )
return gradients
__snake_case = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def a (self : List[Any] , a__ : Dict ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(a__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__snake_case = timeit.repeat(
a__ , repeat=self.args.repeat , number=10 , )
return min(a__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def a (self : Dict , a__ : Callable[[], None] ):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
__snake_case = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
__snake_case = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
__snake_case = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__snake_case = nvml.nvmlDeviceGetMemoryInfo(a__ )
__snake_case = meminfo.used
__snake_case = Memory(a__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
__snake_case = None
else:
__snake_case = measure_peak_memory_cpu(a__ )
__snake_case = Memory(a__ ) if isinstance(a__ , a__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__snake_case = stop_memory_tracing(a__ )
if memory is None:
__snake_case = summary.total
else:
__snake_case = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 24 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_00, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 142 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase ={
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase =[
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 242 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =42
UpperCAmelCase =42
class __magic_name__ ( nn.Module ):
UpperCAmelCase =42
UpperCAmelCase =(1_6, 3_2, 9_6, 2_5_6)
UpperCAmelCase =jnp.floataa
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase : Tuple =[]
for i in range(len(self.block_out_channels) - 1):
_UpperCAmelCase : Optional[int] =self.block_out_channels[i]
_UpperCAmelCase : List[Any] =self.block_out_channels[i + 1]
_UpperCAmelCase : Tuple =nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case)
_UpperCAmelCase : Optional[int] =nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case)
_UpperCAmelCase : Dict =blocks
_UpperCAmelCase : Tuple =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =self.conv_in(snake_case)
_UpperCAmelCase : Any =nn.silu(snake_case)
for block in self.blocks:
_UpperCAmelCase : Optional[Any] =block(snake_case)
_UpperCAmelCase : Union[str, Any] =nn.silu(snake_case)
_UpperCAmelCase : str =self.conv_out(snake_case)
return embedding
@flax_register_to_config
class __magic_name__ ( nn.Module ,lowerCAmelCase ,lowerCAmelCase ):
UpperCAmelCase =3_2
UpperCAmelCase =4
UpperCAmelCase =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase =False
UpperCAmelCase =(3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
UpperCAmelCase =2
UpperCAmelCase =8
UpperCAmelCase =None
UpperCAmelCase =1_2_8_0
UpperCAmelCase =0.0
UpperCAmelCase =False
UpperCAmelCase =jnp.floataa
UpperCAmelCase =True
UpperCAmelCase =0
UpperCAmelCase ="rgb"
UpperCAmelCase =(1_6, 3_2, 9_6, 2_5_6)
def lowerCAmelCase ( self , snake_case) -> FrozenDict:
'''simple docstring'''
# init input tensors
_UpperCAmelCase : Any =(1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase : Optional[Any] =jnp.zeros(snake_case , dtype=jnp.floataa)
_UpperCAmelCase : Optional[int] =jnp.ones((1,) , dtype=jnp.intaa)
_UpperCAmelCase : str =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
_UpperCAmelCase : Optional[Any] =(1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCAmelCase : int =jnp.zeros(snake_case , dtype=jnp.floataa)
_UpperCAmelCase , _UpperCAmelCase : List[Any] =jax.random.split(snake_case)
_UpperCAmelCase : str ={'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case)["params"]
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.block_out_channels
_UpperCAmelCase : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase : Optional[Any] =self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase : Tuple =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase : Union[str, Any] =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
_UpperCAmelCase : str =FlaxTimestepEmbedding(snake_case , dtype=self.dtype)
_UpperCAmelCase : Optional[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCAmelCase : Optional[int] =self.only_cross_attention
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Dict =(only_cross_attention,) * len(self.down_block_types)
if isinstance(snake_case , snake_case):
_UpperCAmelCase : Optional[Any] =(num_attention_heads,) * len(self.down_block_types)
# down
_UpperCAmelCase : int =[]
_UpperCAmelCase : Optional[int] =[]
_UpperCAmelCase : List[str] =block_out_channels[0]
_UpperCAmelCase : int =nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case)
for i, down_block_type in enumerate(self.down_block_types):
_UpperCAmelCase : Tuple =output_channel
_UpperCAmelCase : Dict =block_out_channels[i]
_UpperCAmelCase : str =i == len(snake_case) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase : Tuple =FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCAmelCase : Optional[Any] =FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case)
for _ in range(self.layers_per_block):
_UpperCAmelCase : Tuple =nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case)
if not is_final_block:
_UpperCAmelCase : List[str] =nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case)
_UpperCAmelCase : List[Any] =down_blocks
_UpperCAmelCase : Optional[Any] =controlnet_down_blocks
# mid
_UpperCAmelCase : int =block_out_channels[-1]
_UpperCAmelCase : Optional[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCAmelCase : Optional[int] =nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCAmelCase : Optional[int] =jnp.flip(snake_case , axis=1)
# 1. time
if not isinstance(snake_case , jnp.ndarray):
_UpperCAmelCase : Optional[int] =jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(snake_case , jnp.ndarray) and len(timesteps.shape) == 0:
_UpperCAmelCase : str =timesteps.astype(dtype=jnp.floataa)
_UpperCAmelCase : Dict =jnp.expand_dims(snake_case , 0)
_UpperCAmelCase : int =self.time_proj(snake_case)
_UpperCAmelCase : Any =self.time_embedding(snake_case)
# 2. pre-process
_UpperCAmelCase : str =jnp.transpose(snake_case , (0, 2, 3, 1))
_UpperCAmelCase : Any =self.conv_in(snake_case)
_UpperCAmelCase : List[str] =jnp.transpose(snake_case , (0, 2, 3, 1))
_UpperCAmelCase : Optional[int] =self.controlnet_cond_embedding(snake_case)
sample += controlnet_cond
# 3. down
_UpperCAmelCase : Tuple =(sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case):
_UpperCAmelCase , _UpperCAmelCase : Dict =down_block(snake_case , snake_case , snake_case , deterministic=not train)
else:
_UpperCAmelCase , _UpperCAmelCase : Dict =down_block(snake_case , snake_case , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_UpperCAmelCase : List[Any] =self.mid_block(snake_case , snake_case , snake_case , deterministic=not train)
# 5. contronet blocks
_UpperCAmelCase : Union[str, Any] =()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks):
_UpperCAmelCase : List[str] =controlnet_block(snake_case)
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase : Optional[int] =controlnet_down_block_res_samples
_UpperCAmelCase : List[str] =self.controlnet_mid_block(snake_case)
# 6. scaling
_UpperCAmelCase : Tuple =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case)
| 242 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCAmelCase__ ( a__: int , a__: List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
_UpperCAmelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
_UpperCAmelCase = list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_UpperCAmelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_UpperCAmelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_UpperCAmelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_UpperCAmelCase = tf.placeholder('float64' , [dim] )
_UpperCAmelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_UpperCAmelCase = [tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_UpperCAmelCase = tf.placeholder('int32' )
_UpperCAmelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_UpperCAmelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_UpperCAmelCase = tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_UpperCAmelCase = tf.placeholder('float' , [dim] )
_UpperCAmelCase = tf.placeholder('float' , [dim] )
_UpperCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_UpperCAmelCase = tf.placeholder('float' , [noofclusters] )
_UpperCAmelCase = tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_UpperCAmelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_UpperCAmelCase = 1_0_0
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
_UpperCAmelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_UpperCAmelCase = [
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_UpperCAmelCase = sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
_UpperCAmelCase = [
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_UpperCAmelCase = sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_UpperCAmelCase = sess.run(a__ )
_UpperCAmelCase = sess.run(a__ )
return centroids, assignments
| 329 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=__UpperCamelCase):
UpperCamelCase__ = ["flax", "transformers"]
def __init__( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : Dict ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : str , **lowercase_ : int ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=__UpperCamelCase):
UpperCamelCase__ = ["flax", "transformers"]
def __init__( self : int , *lowercase_ : Optional[int] , **lowercase_ : List[str] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Any , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : List[str] ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=__UpperCamelCase):
UpperCamelCase__ = ["flax", "transformers"]
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : int , **lowercase_ : Tuple ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : int , **lowercase_ : int ):
requires_backends(cls , ["""flax""", """transformers"""] )
class __magic_name__ ( metaclass=__UpperCamelCase):
UpperCamelCase__ = ["flax", "transformers"]
def __init__( self : Any , *lowercase_ : Dict , **lowercase_ : int ):
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : List[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ["""flax""", """transformers"""] )
| 370 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int:
lowercase_ : List[Any] = limit + 1
lowercase_ : Optional[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase__ ):
for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
lowerCAmelCase_ : str = list(range(len(__UpperCamelCase ) ) )
lowerCAmelCase_ : List[Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )]
index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase )
lowerCAmelCase_ : float = 0
lowerCAmelCase_ : list[float] = [0] * len(__UpperCamelCase )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase_ : Tuple = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase_ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : str = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self : Dict , a_ : Dict , *a_ : str ):
lowerCAmelCase_ : Union[str, Any] = accelerate_config_file
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : int = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase_ : Optional[Any] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase_ : List[Any] = "not installed"
lowerCAmelCase_ : Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase_ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase_ : Any = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f'''\t{accelerate_config}'''
)
lowerCAmelCase_ : Union[str, Any] = "not installed"
lowerCAmelCase_ : Dict = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Tuple = torch.__version__
lowerCAmelCase_ : Union[str, Any] = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : List[str] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase_ : Optional[Any] = "not installed"
lowerCAmelCase_ : Optional[int] = "not installed"
lowerCAmelCase_ : Tuple = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : List[Any] = flax.__version__
lowerCAmelCase_ : Tuple = jax.__version__
lowerCAmelCase_ : List[Any] = jaxlib.__version__
lowerCAmelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Dict = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Tuple ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 241 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ :Any = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowercase = [image]
lowercase = [trans(img.convert('''RGB''' ) ) for img in image]
lowercase = torch.stack(lowerCAmelCase__ )
return image
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__):
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=A__ ,scheduler=A__)
def A__ ( self ,A__):
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def A__ ( self ,A__ ,A__ ,A__):
# get the original timestep using init_timestep
lowercase = min(int(num_inference_steps * strength) ,A__)
lowercase = max(num_inference_steps - init_timestep ,0)
lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__=None):
if not isinstance(A__ ,(torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A__)}')
lowercase = image.to(device=A__ ,dtype=A__)
if isinstance(A__ ,A__) and len(A__) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(A__)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
lowercase = init_latents.shape
lowercase = randn_tensor(A__ ,generator=A__ ,device=A__ ,dtype=A__)
# get latents
print('''add noise to latents at timestep''' ,A__)
lowercase = self.scheduler.add_noise(A__ ,A__ ,A__)
lowercase = init_latents
return latents
@torch.no_grad()
def __call__( self ,A__ = None ,A__ = 0.8 ,A__ = 1 ,A__ = None ,A__ = 0.0 ,A__ = 5_0 ,A__ = None ,A__ = "pil" ,A__ = True ,):
self.check_inputs(A__)
# 2. Preprocess image
lowercase = preprocess(A__)
# 3. set timesteps
self.scheduler.set_timesteps(A__ ,device=self.device)
lowercase , lowercase = self.get_timesteps(A__ ,A__ ,self.device)
lowercase = timesteps[:1].repeat(A__)
# 4. Prepare latent variables
lowercase = self.prepare_latents(A__ ,A__ ,A__ ,self.unet.dtype ,self.device ,A__)
lowercase = latents
# 5. Denoising loop
for t in self.progress_bar(A__):
# 1. predict noise model_output
lowercase = self.unet(A__ ,A__).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(
A__ ,A__ ,A__ ,eta=A__ ,use_clipped_model_output=A__ ,generator=A__ ,).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
lowercase = image.cpu().permute(0 ,2 ,3 ,1).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A__)
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :Any = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features({} )
_lowercase : str = "text"
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 110 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = None
# source code of `config_class`
lowercase__ = inspect.getsource(SCREAMING_SNAKE_CASE )
lowercase__ = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowercase__ = ckpt_name
break
return checkpoint
def _a ( ):
"""simple docstring"""
lowercase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__ = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE )
lowercase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 110 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( a_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = FunnelTokenizer
_UpperCamelCase : str = FunnelTokenizerFast
_UpperCamelCase : str = True
_UpperCamelCase : int = True
def __A ( self ):
super().setUp()
_lowerCAmelCase : Optional[int] = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **a__ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """UNwant\u00E9d,running"""
_lowerCAmelCase : str = """unwanted, running"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : int = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(a__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
_lowerCAmelCase : List[Any] = tokenizer("""UNwant\u00E9d,running""" )
_lowerCAmelCase : int = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_lowerCAmelCase : Tuple = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 350 | """simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Dict = [mem.copy() for i in range(4 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Tuple = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : List[str] = Text("""Model""" , font_size=24 )
_lowerCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_lowerCAmelCase : Any = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : int = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase : List[Any] = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : Tuple = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_lowerCAmelCase : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 126 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.