code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self):
lowercase = 1
lowercase = 3
lowercase = (3_2, 3_2)
lowercase = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0)).to(A__)
return image
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_6 ,)
return RobertaSeriesModelWithTransformation(A__)
@property
def A__ ( self):
def extract(*A__ ,**A__):
class lowercase :
def __init__( self):
lowercase = torch.ones([0])
def A__ ( self ,A__):
self.pixel_values.to(A__)
return self
return Out()
return extract
def A__ ( self):
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
lowercase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,)
lowercase = output.images
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=A__)
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''')
lowercase = 7_7
lowercase = self.dummy_image.to(A__)
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = AltDiffusionImgaImgPipeline(
unet=A__ ,scheduler=A__ ,vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,safety_checker=A__ ,feature_extractor=self.dummy_extractor ,)
lowercase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=A__)
lowercase = alt_pipe.to(A__)
alt_pipe.set_progress_bar_config(disable=A__)
lowercase = '''A painting of a squirrel eating a burger'''
lowercase = torch.manual_seed(0)
lowercase = alt_pipe(
[prompt] ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,image=A__ ,).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''')
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase = init_image.resize((7_6_0, 5_0_4))
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
lowercase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowercase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
lowercase = init_image.resize((7_6_8, 5_1_2))
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''')
lowercase = '''BAAI/AltDiffusion'''
lowercase = AltDiffusionImgaImgPipeline.from_pretrained(
A__ ,safety_checker=A__ ,)
pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
pipe.enable_attention_slicing()
lowercase = '''A fantasy landscape, trending on artstation'''
lowercase = torch.manual_seed(0)
lowercase = pipe(
prompt=A__ ,image=A__ ,strength=0.75 ,guidance_scale=7.5 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1E-2
| 101 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase ( _snake_case : str = "" ) ->dict[str, float]:
"""simple docstring"""
__snake_case : List[Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
__snake_case : Dict = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
__snake_case : Union[str, Any] = soup.find_all('''td''' , attrs='''titleColumn''' )
__snake_case : int = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_snake_case , _snake_case )
}
def lowercase ( _snake_case : str = "IMDb_Top_250_Movies.csv" ) ->None:
"""simple docstring"""
__snake_case : List[Any] = get_imdb_top_aaa_movies()
with open(_snake_case , '''w''' , newline='''''' ) as out_file:
__snake_case : Dict = csv.writer(_snake_case )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 102 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__lowercase , __lowercase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowercase , __lowercase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Dict = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""data2vec-audio"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=16 , lowerCAmelCase__=19 , lowerCAmelCase__=5 , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
a : List[str] = hidden_size
a : Union[str, Any] = feat_extract_activation
a : Any = list(lowerCAmelCase__ )
a : int = list(lowerCAmelCase__ )
a : str = list(lowerCAmelCase__ )
a : str = conv_bias
a : Union[str, Any] = num_conv_pos_embeddings
a : int = num_conv_pos_embedding_groups
a : Union[str, Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim )
a : List[str] = num_hidden_layers
a : str = intermediate_size
a : Tuple = hidden_act
a : List[str] = num_attention_heads
a : str = hidden_dropout
a : int = attention_dropout
a : Union[str, Any] = activation_dropout
a : Optional[Any] = feat_proj_dropout
a : str = final_dropout
a : Optional[Any] = layerdrop
a : List[str] = layer_norm_eps
a : int = initializer_range
a : Union[str, Any] = vocab_size
a : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : str = mask_time_prob
a : Optional[Any] = mask_time_length
a : Optional[Any] = mask_time_min_masks
a : List[Any] = mask_feature_prob
a : List[Any] = mask_feature_length
a : Optional[int] = mask_feature_min_masks
# ctc loss
a : Optional[int] = ctc_loss_reduction
a : int = ctc_zero_infinity
# adapter
a : str = add_adapter
a : Any = adapter_kernel_size
a : Any = adapter_stride
a : str = num_adapter_layers
a : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : Optional[int] = list(lowerCAmelCase__ )
a : List[Any] = list(lowerCAmelCase__ )
a : Dict = list(lowerCAmelCase__ )
a : Tuple = xvector_output_dim
@property
def __a ( self ) -> Union[str, Any]:
return math.prod(self.conv_stride )
| 105 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : List[str] = 8
def __SCREAMING_SNAKE_CASE ( A_ , A_=BITS ):
lowerCAmelCase__ : Any = x.device
lowerCAmelCase__ : Optional[int] = (x * 2_55).int().clamp(0 , 2_55 )
lowerCAmelCase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ )
lowerCAmelCase__ : List[Any] = rearrange(A_ , '''d -> d 1 1''' )
lowerCAmelCase__ : Any = rearrange(A_ , '''b c h w -> b c 1 h w''' )
lowerCAmelCase__ : Dict = ((x & mask) != 0).float()
lowerCAmelCase__ : Any = rearrange(A_ , '''b c d h w -> b (c d) h w''' )
lowerCAmelCase__ : Union[str, Any] = bits * 2 - 1
return bits
def __SCREAMING_SNAKE_CASE ( A_ , A_=BITS ):
lowerCAmelCase__ : str = x.device
lowerCAmelCase__ : Dict = (x > 0).int()
lowerCAmelCase__ : Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ , dtype=torch.intaa )
lowerCAmelCase__ : Dict = rearrange(A_ , '''d -> d 1 1''' )
lowerCAmelCase__ : List[str] = rearrange(A_ , '''b (c d) h w -> b c d h w''' , d=8 )
lowerCAmelCase__ : Union[str, Any] = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __SCREAMING_SNAKE_CASE ( self , A_ , A_ , A_ , A_ = 0.0 , A_ = True , A_=None , A_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase__ : int = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase__ : Tuple = self.alphas_cumprod[timestep]
lowerCAmelCase__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : Dict = torch.clamp(A_ , -scale , A_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase__ : str = self._get_variance(A_ , A_ )
lowerCAmelCase__ : List[Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : int = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Optional[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase__ : List[str] = model_output.device if torch.is_tensor(A_ ) else '''cpu'''
lowerCAmelCase__ : Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=A_ ).to(A_ )
lowerCAmelCase__ : Dict = self._get_variance(A_ , A_ ) ** 0.5 * eta * noise
lowerCAmelCase__ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
def __SCREAMING_SNAKE_CASE ( self , A_ , A_ , A_ , A_="epsilon" , A_=None , A_ = True , ):
lowerCAmelCase__ : Union[str, Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ ,lowerCAmelCase__ : int = torch.split(A_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase__ : List[Any] = None
# 1. compute alphas, betas
lowerCAmelCase__ : List[str] = self.alphas_cumprod[t]
lowerCAmelCase__ : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase__ : List[str] = 1 - alpha_prod_t
lowerCAmelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase__ : Any = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
lowerCAmelCase__ : List[Any] = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : Dict = torch.clamp(A_ , -scale , A_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase__ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase__ : int = 0
if t > 0:
lowerCAmelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=A_ ).to(model_output.device )
lowerCAmelCase__ : Union[str, Any] = (self._get_variance(A_ , predicted_variance=A_ ) ** 0.5) * noise
lowerCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : UNetaDConditionModel ,lowercase_ : Union[DDIMScheduler, DDPMScheduler] ,lowercase_ : Optional[float] = 1.0 ,):
super().__init__()
lowerCAmelCase__ : Any = bit_scale
lowerCAmelCase__ : Optional[int] = (
ddim_bit_scheduler_step if isinstance(lowercase_ ,lowercase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowercase_ ,scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] ,lowercase_ : Optional[int] = 2_5_6 ,lowercase_ : Optional[int] = 2_5_6 ,lowercase_ : Optional[int] = 5_0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,**lowercase_ : Tuple ,):
lowerCAmelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=lowercase_ ,)
lowerCAmelCase__ : int = decimal_to_bits(lowercase_ ) * self.bit_scale
lowerCAmelCase__ : Optional[int] = latents.to(self.device )
self.scheduler.set_timesteps(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase__ : Dict = self.unet(lowercase_ ,lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : int = self.scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
lowerCAmelCase__ : Optional[Any] = bits_to_decimal(lowercase_ )
if output_type == "pil":
lowerCAmelCase__ : Union[str, Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 106 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase : int = logging.get_logger(__name__)
# TODO: upload to AWS
__lowerCAmelCase : Union[str, Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = """retribert"""
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_05_22 , __lowerCamelCase : List[Any]=7_68 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=1_28 , __lowerCamelCase : Tuple=0 , **__lowerCamelCase : List[Any] , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = share_encoders
a = projection_dim
| 107 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : Union[str, None] = None ):
'''simple docstring'''
lowerCAmelCase : List[str] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowerCAmelCase : Optional[Any] = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase : Union[str, Any] = src_path
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 108 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = data
UpperCAmelCase : Dict = None
UpperCAmelCase : str = None
def _snake_case ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
UpperCAmelCase : List[str] = input("""Enter the value of the root node: """ ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : str = TreeNode(int(UpperCamelCase ) )
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
UpperCAmelCase : Any = F"Enter the left node of {node_found.data}: "
UpperCAmelCase : int = input(UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCamelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCamelCase )
UpperCAmelCase : List[str] = F"Enter the right node of {node_found.data}: "
UpperCAmelCase : Optional[Any] = input(UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase : Union[str, Any] = TreeNode(int(UpperCamelCase ) )
UpperCAmelCase : List[str] = right_node
q.put(UpperCamelCase )
raise
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : str = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCamelCase )
while not q.empty():
UpperCAmelCase : Optional[int] = []
while not q.empty():
UpperCAmelCase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCamelCase )
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(UpperCamelCase )
UpperCAmelCase : Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Optional[Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : Optional[Any] = n.right
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(UpperCamelCase )
UpperCAmelCase : Optional[Any] = n.left
UpperCAmelCase : Tuple = stack.pop()
print(n.data , end=""",""" )
UpperCAmelCase : int = n.right
def _snake_case ( UpperCamelCase : TreeNode ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Optional[int] = [], []
UpperCAmelCase : str = node
stacka.append(UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : str = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( UpperCamelCase : str = "" , UpperCamelCase : List[str]=50 , UpperCamelCase : int="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : Optional[int] = divmod(width - len(UpperCamelCase ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A: TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 109 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = LEDTokenizerFast
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[Any] ):
super().setUp()
__snake_case: Tuple = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__snake_case: str = dict(zip(A , range(len(A ) ) ) )
__snake_case: List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case: List[str] = {"""unk_token""": """<unk>"""}
__snake_case: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase__ ( self : Dict , **A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : List[str] , **A : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[Any] ):
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def UpperCAmelCase__ ( self : List[Any] ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def UpperCAmelCase__ ( self : int ):
__snake_case: str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__snake_case: List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: Optional[int] = tokenizer(A , max_length=len(A ) , padding=A , return_tensors="""pt""" )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__snake_case: Dict = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: int = tokenizer(A , padding=A , return_tensors="""pt""" )
self.assertIn("""input_ids""" , A )
self.assertIn("""attention_mask""" , A )
self.assertNotIn("""labels""" , A )
self.assertNotIn("""decoder_attention_mask""" , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: Dict = tokenizer(text_target=A , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: Optional[int] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=A , truncation=A , return_tensors="""pt""" )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def UpperCAmelCase__ ( self : str ):
__snake_case: int = ["""A long paragraph for summarization."""]
__snake_case: int = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: Optional[Any] = tokenizer(A , return_tensors="""pt""" )
__snake_case: Union[str, Any] = tokenizer(text_target=A , return_tensors="""pt""" )
__snake_case: int = inputs["""input_ids"""]
__snake_case: str = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__snake_case: Union[str, Any] = ["""Summary of the text.""", """Another summary."""]
__snake_case: Union[str, Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__snake_case: int = tokenizer(A , padding=A )
__snake_case: str = [[0] * len(A ) for x in encoded_output["""input_ids"""]]
__snake_case: List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , A )
def UpperCAmelCase__ ( self : int ):
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: int = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Any = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = """A, <mask> AllenNLP sentence."""
__snake_case: int = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
__snake_case: List[str] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__snake_case: Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__snake_case: Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase : Tuple = SamImageProcessor()
UpperCAmelCase : Any = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Tuple = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCAmelCase : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : int = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.prepare_image_inputs()
UpperCAmelCase : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCAmelCase : str = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : str = self.get_image_processor()
UpperCAmelCase : str = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase : Tuple = [[1764, 2646]]
UpperCAmelCase : int = [[683, 1024]]
UpperCAmelCase : List[Any] = processor.post_process_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase : List[str] = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , torch.tensor(_SCREAMING_SNAKE_CASE ) , torch.tensor(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase : int = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase : str = processor.post_process_masks(_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = processor.post_process_masks(_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Union[str, Any] = SamImageProcessor()
UpperCAmelCase : List[str] = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : List[str] = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCAmelCase : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_image_processor()
UpperCAmelCase : List[Any] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.prepare_image_inputs()
UpperCAmelCase : Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCAmelCase : Any = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.get_image_processor()
UpperCAmelCase : int = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase : Optional[Any] = [[1764, 2646]]
UpperCAmelCase : Union[str, Any] = [[683, 1024]]
UpperCAmelCase : Optional[int] = processor.post_process_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase : Any = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
UpperCAmelCase : Optional[Any] = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase : Union[str, Any] = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
UpperCAmelCase : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase : Optional[Any] = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , np.array(_SCREAMING_SNAKE_CASE ) , np.array(_SCREAMING_SNAKE_CASE ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] = tempfile.mkdtemp()
UpperCAmelCase : str = SamImageProcessor()
UpperCAmelCase : List[Any] = SamProcessor(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : str = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : Any = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase : List[Any] = [tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase : Dict = [torch.tensor(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase : Dict = [[1764, 2646]]
UpperCAmelCase : List[Any] = [[683, 1024]]
UpperCAmelCase : Dict = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
UpperCAmelCase : Optional[Any] = processor.post_process_masks(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_image_processor()
UpperCAmelCase : str = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )["""pixel_values"""].numpy()
UpperCAmelCase : int = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )["""pixel_values"""].numpy()
UpperCAmelCase : Tuple = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )["""pixel_values"""].numpy()
UpperCAmelCase : int = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 109 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) ->Dict:
"""simple docstring"""
a_ = {"add_prefix_space": True} if isinstance(__lowercase , __lowercase ) and not line.startswith(" " ) else {}
a_ = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding="max_length" if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) ->List[str]:
"""simple docstring"""
a_ = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="train" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , ) ->Dict:
super().__init__()
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".source")
a_ = Path(__UpperCAmelCase).joinpath(type_path + ".target")
a_ = self.get_char_lens(self.src_file)
a_ = max_source_length
a_ = max_target_length
assert min(self.src_lens) > 0, F'''found empty line in {self.src_file}'''
a_ = tokenizer
a_ = prefix
if n_obs is not None:
a_ = self.src_lens[:n_obs]
a_ = src_lang
a_ = tgt_lang
def __len__( self) ->Optional[Any]:
return len(self.src_lens)
def __getitem__( self , __UpperCAmelCase) ->Tuple:
a_ = index + 1 # linecache starts at 1
a_ = self.prefix + linecache.getline(str(self.src_file) , __UpperCAmelCase).rstrip("\n")
a_ = linecache.getline(str(self.tgt_file) , __UpperCAmelCase).rstrip("\n")
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
)
a_ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase) else self.tokenizer
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right")
a_ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right")
a_ = source_inputs["input_ids"].squeeze()
a_ = target_inputs["input_ids"].squeeze()
a_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->Optional[Any]:
return [len(__UpperCAmelCase) for x in Path(__UpperCAmelCase).open().readlines()]
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]:
a_ = torch.stack([x["input_ids"] for x in batch])
a_ = torch.stack([x["attention_mask"] for x in batch])
a_ = torch.stack([x["decoder_input_ids"] for x in batch])
a_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase)
else self.tokenizer.pad_token_id
)
a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase)
a_ , a_ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase)
a_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase_ = getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , "git_log.json" ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCamelCase ( ) ->int:
"""simple docstring"""
a_ = git.Repo(search_parent_directories=__lowercase )
a_ = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List:
"""simple docstring"""
return list(map(__lowercase , __lowercase ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
with open(__lowercase , "wb" ) as f:
return pickle.dump(__lowercase , __lowercase )
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowercase )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
a_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = normalize_answer(__lowercase ).split()
a_ = normalize_answer(__lowercase ).split()
a_ = Counter(__lowercase ) & Counter(__lowercase )
a_ = sum(common.values() )
if num_same == 0:
return 0
a_ = 1.0 * num_same / len(__lowercase )
a_ = 1.0 * num_same / len(__lowercase )
a_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
assert len(__lowercase ) == len(__lowercase )
a_ = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
return model_prefix.startswith("rag" )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a_ = "dropout_rate"
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info("config doesn\'t have a `{}` attribute".format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
a_ = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = []
for line in lines:
_UpperCAmelCase : Optional[int] = re.sub(r"#.*", "", __lowercase ) # remove comments
if line:
filtered_lines.append(__lowercase )
_UpperCAmelCase : str = "\n".join(__lowercase )
# Make a hash from all this code
_UpperCAmelCase : Optional[int] = full_str.encode("utf-8" )
return shaaaa(__lowercase ).hexdigest()
# get importable module names and hash for caching
__a = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__a = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__a = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__a = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip') | 145 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 0 |
"""simple docstring"""
import random
def __magic_name__ ( __snake_case : list , __snake_case : Dict ) -> tuple:
lowercase , lowercase , lowercase : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def __magic_name__ ( __snake_case : list , __snake_case : int ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__lowercase ) or index < 0:
return None
lowercase : List[str] = items[random.randint(0 , len(__lowercase ) - 1 )]
lowercase : Tuple = 0
lowercase , lowercase , lowercase : Optional[Any] = _partition(__lowercase , __lowercase )
lowercase : Optional[int] = len(__lowercase )
lowercase : Dict = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 202 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowercase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , ):
output_path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , use_external_data_format=__lowercase , enable_onnx_checker=__lowercase , opset_version=__lowercase , )
else:
export(
__lowercase , __lowercase , f=output_path.as_posix() , input_names=__lowercase , output_names=__lowercase , dynamic_axes=__lowercase , do_constant_folding=__lowercase , opset_version=__lowercase , )
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False ):
UpperCAmelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase : int = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
UpperCAmelCase : Tuple = 'cpu'
UpperCAmelCase : int = StableDiffusionPipeline.from_pretrained(__lowercase , torch_dtype=__lowercase ).to(__lowercase )
UpperCAmelCase : int = Path(__lowercase )
# TEXT ENCODER
UpperCAmelCase : Dict = pipeline.text_encoder.config.max_position_embeddings
UpperCAmelCase : Optional[int] = pipeline.text_encoder.config.hidden_size
UpperCAmelCase : str = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=__lowercase , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__lowercase , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=__lowercase , )
del pipeline.text_encoder
# UNET
UpperCAmelCase : Union[str, Any] = pipeline.unet.config.in_channels
UpperCAmelCase : List[str] = pipeline.unet.config.sample_size
UpperCAmelCase : Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(2 ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(2 , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=__lowercase , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=__lowercase , use_external_data_format=__lowercase , )
UpperCAmelCase : Dict = str(unet_path.absolute().as_posix() )
UpperCAmelCase : Any = os.path.dirname(__lowercase )
UpperCAmelCase : int = onnx.load(__lowercase )
# clean up existing tensor files
shutil.rmtree(__lowercase )
os.mkdir(__lowercase )
# collate external tensor files into one
onnx.save_model(
__lowercase , __lowercase , save_as_external_data=__lowercase , all_tensors_to_one_file=__lowercase , location='weights.pb' , convert_attribute=__lowercase , )
del pipeline.unet
# VAE ENCODER
UpperCAmelCase : List[Any] = pipeline.vae
UpperCAmelCase : Optional[Any] = vae_encoder.config.in_channels
UpperCAmelCase : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
UpperCAmelCase : Any = lambda UpperCAmelCase_ , UpperCAmelCase_ : vae_encoder.encode(__lowercase , __lowercase )[0].sample()
onnx_export(
__lowercase , model_args=(
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__lowercase , )
# VAE DECODER
UpperCAmelCase : List[Any] = pipeline.vae
UpperCAmelCase : Optional[int] = vae_decoder.config.latent_channels
UpperCAmelCase : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
UpperCAmelCase : Dict = vae_encoder.decode
onnx_export(
__lowercase , model_args=(
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=__lowercase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
UpperCAmelCase : int = pipeline.safety_checker
UpperCAmelCase : Any = safety_checker.config.vision_config.num_channels
UpperCAmelCase : Union[str, Any] = safety_checker.config.vision_config.image_size
UpperCAmelCase : str = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __lowercase , __lowercase , __lowercase , ).to(device=__lowercase , dtype=__lowercase ),
torch.randn(1 , __lowercase , __lowercase , __lowercase ).to(device=__lowercase , dtype=__lowercase ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=__lowercase , )
del pipeline.safety_checker
UpperCAmelCase : str = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
UpperCAmelCase : str = pipeline.feature_extractor
else:
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__lowercase )
print('ONNX pipeline saved to' , __lowercase )
del pipeline
del onnx_pipeline
UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(__lowercase , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 151 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase = '''true'''
def snake_case_ ( snake_case , snake_case=82 , snake_case=16 ) -> Optional[Any]:
set_seed(42 )
lowercase__: List[Any] = RegressionModel()
lowercase__: Any = deepcopy(__lowercase )
lowercase__: int = RegressionDataset(length=__lowercase )
lowercase__: Optional[int] = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
lowercase__ , lowercase__: Any = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( snake_case , snake_case=False ) -> int:
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowercase__: Dict = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case ):
lowercase__: Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
lowercase__: Dict = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowercase__: List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case ):
if use_longest:
return tokenizer.pad(__lowercase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(__lowercase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=16 )
def snake_case_ ( snake_case , snake_case ) -> Optional[int]:
lowercase__: Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
lowercase__: List[Any] = get_dataloader(__lowercase , not dispatch_batches )
lowercase__: Any = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=__lowercase )
lowercase__ , lowercase__: Optional[Any] = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( snake_case , snake_case , snake_case ) -> Tuple:
lowercase__: Optional[Any] = []
for batch in dataloader:
lowercase__ , lowercase__: Optional[int] = batch.values()
with torch.no_grad():
lowercase__: Union[str, Any] = model(__lowercase )
lowercase__ , lowercase__: Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowercase__ , lowercase__: Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
lowercase__ , lowercase__: int = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( snake_case , snake_case=82 , snake_case=False , snake_case=False , snake_case=16 ) -> List[str]:
lowercase__ , lowercase__ , lowercase__: List[Any] = get_basic_setup(__lowercase , __lowercase , __lowercase )
lowercase__ , lowercase__: Tuple = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'
def snake_case_ ( snake_case = False , snake_case = False ) -> str:
lowercase__: str = evaluate.load('glue' , 'mrpc' )
lowercase__ , lowercase__: Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
lowercase__ , lowercase__ , lowercase__: Dict = setup['no']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
lowercase__: str = model(**__lowercase )
lowercase__: Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['labels'] )
lowercase__: List[Any] = metric.compute()
# Then do distributed
lowercase__ , lowercase__ , lowercase__: Optional[int] = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowercase__: int = model(**__lowercase )
lowercase__: int = outputs.logits.argmax(dim=-1 )
lowercase__: str = batch['labels']
lowercase__ , lowercase__: Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
lowercase__: int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def snake_case_ ( ) -> int:
lowercase__: Dict = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowercase__: Union[str, Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(__lowercase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowercase__: Tuple = Accelerator()
test_torch_metrics(__lowercase , 5_12 )
accelerator.state._reset_state()
def snake_case_ ( snake_case ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = ["pixel_values"]
def __init__( self : str , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 255 , A : bool = True , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A : Union[str, Any] , ):
super().__init__(**A )
_UpperCAmelCase : str = size if size is not None else {"shortest_edge": 224}
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase : Dict = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : Optional[Any] = size
_UpperCAmelCase : Dict = resample
_UpperCAmelCase : List[Any] = do_center_crop
_UpperCAmelCase : Union[str, Any] = crop_size
_UpperCAmelCase : str = do_rescale
_UpperCAmelCase : int = rescale_factor
_UpperCAmelCase : Tuple = do_normalize
_UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _A ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
_UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : List[Any] = int((256 / 224) * size["shortest_edge"] )
_UpperCAmelCase : Optional[Any] = get_resize_output_image_size(A , size=A , default_to_square=A )
_UpperCAmelCase : List[str] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" )
return resize(
A , size=(size_dict["height"], size_dict["width"]) , resample=A , data_format=A , **A )
def _A ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
_UpperCAmelCase : Optional[int] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def _A ( self : str , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
return rescale(A , scale=A , data_format=A , **A )
def _A ( self : Optional[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def _A ( self : List[Any] , A : ImageInput , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : PILImageResampling = None , A : Optional[bool] = None , A : Optional[Dict[str, int]] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[Union[float, Iterable[float]]] = None , A : Optional[TensorType] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Tuple , ):
_UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Dict = resample if resample is not None else self.resample
_UpperCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : int = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = size if size is not None else self.size
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[Any] = get_size_dict(A , param_name="crop_size" )
_UpperCAmelCase : Dict = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : int = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : List[str] = [self.resize(A , A , A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[Any] = [self.center_crop(A , A ) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(A , A ) for image in images]
if do_normalize:
_UpperCAmelCase : Tuple = [self.normalize(A , A , A ) for image in images]
_UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
def __snake_case ( _lowerCAmelCase : int = 1000 ) -> int:
A_ : str = 3
A_ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 300 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : list[int] ) -> Optional[Any]:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ,A_ : int ) -> Tuple:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Any:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "token-classification"
def __init__( self : Optional[Any] , A : Union[str, Any] ):
if type(A ) == dict:
__snake_case: List[str] = Namespace(**A )
__snake_case: int = import_module("""tasks""" )
try:
__snake_case: List[str] = getattr(A , hparams.task_type )
__snake_case: int = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: int = self.token_classification_task.get_labels(hparams.labels )
__snake_case: Dict = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Tuple , **A : Tuple ):
return self.model(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[int] , A : Optional[int] ):
__snake_case: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Optional[int] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Tuple = self(**A )
__snake_case: List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: List[str] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
__snake_case: List[str] = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: List[str] = self.token_classification_task.read_examples_from_file(args.data_dir , A )
__snake_case: List[Any] = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def UpperCAmelCase__ ( self : Optional[int] , A : int , A : int , A : bool = False ):
__snake_case: Union[str, Any] = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
__snake_case: Tuple = torch.load(A )
__snake_case: Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: List[Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def UpperCAmelCase__ ( self : Dict , A : List[Any] , A : Tuple ):
"""Compute validation""" ""
__snake_case: List[str] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: Union[str, Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A )
__snake_case , __snake_case: Any = outputs[:2]
__snake_case: Optional[int] = logits.detach().cpu().numpy()
__snake_case: Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] ):
__snake_case: Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: Union[str, Any] = np.argmax(A , axis=2 )
__snake_case: Any = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: Optional[int] = dict(enumerate(self.labels ) )
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: str = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Union[str, Any] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
__snake_case: Tuple = dict(results.items() )
__snake_case: List[str] = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Optional[int] , A : Tuple ):
__snake_case , __snake_case , __snake_case: Union[str, Any] = self._eval_end(A )
__snake_case: Optional[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] ):
__snake_case , __snake_case , __snake_case: str = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: List[str] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : Optional[Any] ):
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Optional[int] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : List[str] = NERTransformer(args)
__UpperCAmelCase : List[str] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : str = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 111 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A: Tuple = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCamelCase_ = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCamelCase_ = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
UpperCamelCase_ = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self) ->List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False) ->Union[str, Any]:
if concatenate_texts:
return compute_measures(__UpperCAmelCase , __UpperCAmelCase)["wer"]
else:
a_ = 0
a_ = 0
for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase):
a_ = compute_measures(__UpperCAmelCase , __UpperCAmelCase)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__a = logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase )
class A__ :
"""simple docstring"""
UpperCamelCase_ : str
UpperCamelCase_ : str
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
@dataclass(frozen=UpperCamelCase )
class A__ :
"""simple docstring"""
UpperCamelCase_ : List[int]
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[Union[int, float]] = None
UpperCamelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[InputFeatures]
def __init__( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = hans_processors[task]()
_UpperCAmelCase : Optional[int] = os.path.join(
lowerCAmelCase__ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase__ ) , lowerCAmelCase__ , ) , )
_UpperCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Any = label_list[2], label_list[1]
_UpperCAmelCase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : Dict = cached_features_file + ".lock"
with FileLock(lowerCAmelCase__ ):
if os.path.exists(lowerCAmelCase__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_UpperCAmelCase : Any = torch.load(lowerCAmelCase__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_UpperCAmelCase : Tuple = (
processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
)
logger.info("Training examples: %s" , len(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info("Saving features into cached file %s" , lowerCAmelCase__ )
torch.save(self.features , lowerCAmelCase__ )
def __len__( self : List[Any] ) -> Dict:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = 1_2_8 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : bool = False , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = hans_processors[task]()
_UpperCAmelCase : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = label_list[2], label_list[1]
_UpperCAmelCase : str = label_list
_UpperCAmelCase : List[str] = processor.get_dev_examples(lowerCAmelCase__ ) if evaluate else processor.get_train_examples(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hans_convert_examples_to_features(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase : int = tf.data.Dataset.from_generator(
lowerCAmelCase__ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.dataset
def __len__( self : str ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return self.label_list
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_train_set.txt" ) ) , "train" )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Dict:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase__ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = []
for i, line in enumerate(lowerCAmelCase__ ):
if i == 0:
continue
_UpperCAmelCase : Optional[Any] = "%s-%s" % (set_type, line[0])
_UpperCAmelCase : Optional[int] = line[5]
_UpperCAmelCase : Any = line[6]
_UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
_UpperCAmelCase : Tuple = line[0]
examples.append(InputExample(guid=lowerCAmelCase__ , text_a=lowerCAmelCase__ , text_b=lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
return examples
def __UpperCAmelCase ( a_: List[InputExample], a_: List[str], a_: int, a_: PreTrainedTokenizer, ):
_UpperCAmelCase : int = {label: i for i, label in enumerate(__lowercase )}
_UpperCAmelCase : str = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase ), desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
_UpperCAmelCase : List[Any] = tokenizer(
example.text_a, example.text_b, add_special_tokens=__lowercase, max_length=__lowercase, padding="max_length", truncation=__lowercase, return_overflowing_tokens=__lowercase, )
_UpperCAmelCase : Any = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase : int = int(example.pairID )
features.append(InputFeatures(**__lowercase, label=__lowercase, pairID=__lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__a = {
'''hans''': 3,
}
__a = {
'''hans''': HansProcessor,
} | 145 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : list ) -> list:
if len(__lowercase ) <= 1:
return [tuple(__lowercase )]
lowercase : List[Any] = []
def generate(__snake_case : int , __snake_case : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __lowercase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowercase , lowercase : Union[str, Any] = arr[k - 1], arr[i]
else: # k is odd
lowercase , lowercase : List[str] = arr[k - 1], arr[0]
generate(k - 1 , __lowercase )
generate(len(__lowercase ) , __lowercase )
return res
if __name__ == "__main__":
_A : int = input("""Enter numbers separated by a comma:\n""").strip()
_A : Any = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 202 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase__ = '''sshleifer/student_marian_en_ro_6_1'''
lowercase__ = '''sshleifer/tiny-mbart'''
@require_torch
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , extra_args_str=lowercase_ , predict_with_generate=lowercase_ , do_train=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , )
UpperCAmelCase : Dict = TrainerState.load_from_json(os.path.join(lowercase_ , 'trainer_state.json' ) ).log_history
if not do_eval:
return
UpperCAmelCase : Union[str, Any] = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase : Optional[Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowercase_ )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
self.run_seqaseq_quick(distributed=lowercase_ )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> int:
self.run_seqaseq_quick(distributed=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : Dict ) -> Any:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowercase_ )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def UpperCAmelCase_ ( self : int ) -> Dict:
self.run_seqaseq_quick(
distributed=lowercase_ , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowercase_ )
@require_apex
@require_torch_gpu
def UpperCAmelCase_ ( self : Any ) -> Any:
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Any ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
UpperCAmelCase : List[str] = experiments[experiment_id]
UpperCAmelCase : List[Any] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
UpperCAmelCase : str = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase_ , extra_args_str=data['extra_args_str'] )
UpperCAmelCase : List[Any] = len(re.findall(lowercase_ , cl.err ) )
self.assertEqual(lowercase_ , data['n_matches'] )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowercase_ , learning_rate=3E-4 , num_train_epochs=10 , distributed=lowercase_ , )
# Check metrics
UpperCAmelCase : Union[str, Any] = TrainerState.load_from_json(os.path.join(lowercase_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase : Optional[Any] = [log for log in logs if 'eval_loss' in log.keys()]
UpperCAmelCase : Any = eval_metrics[0]
UpperCAmelCase : List[str] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowercase_ )
# test if do_predict saves generations and metrics
UpperCAmelCase : List[str] = os.listdir(lowercase_ )
UpperCAmelCase : List[str] = {os.path.basename(lowercase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCAmelCase_ ( self : Tuple ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase_ : str ) -> Tuple[int, float]:
UpperCAmelCase : Optional[Any] = '--skip_memory_metrics 0'
UpperCAmelCase : Tuple = self.run_trainer(
max_len=128 , model_name=lowercase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowercase_ , distributed=lowercase_ , extra_args_str=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase : int = TrainerState.load_from_json(Path(lowercase_ , 'trainer_state.json' ) ).log_history
UpperCAmelCase : Tuple = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
UpperCAmelCase : Union[str, Any] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
UpperCAmelCase : int = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase : List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase : Optional[Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase : Optional[int] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase : Dict = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase : Tuple = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase_ , lowercase_ , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
lowercase_ , lowercase_ , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
lowercase_ , lowercase_ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : float = 3E-3 , lowercase_ : str = "adafactor" , lowercase_ : bool = False , lowercase_ : str = None , lowercase_ : int = 0 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : int = None , ) -> Tuple:
UpperCAmelCase : Tuple = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Optional[int] = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowercase_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowercase_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCAmelCase : Union[str, Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowercase_ )}
""".split()
UpperCAmelCase : int = '\n --do_predict\n '.split()
UpperCAmelCase : Optional[int] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase : Any = get_gpu_count()
UpperCAmelCase : Any = get_torch_dist_unique_port()
UpperCAmelCase : Union[str, Any] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCAmelCase : Union[str, Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_ , env=self.get_env() )
else:
UpperCAmelCase : int = ['run_translation.py'] + args
with patch.object(lowercase_ , 'argv' , lowercase_ ):
main()
return output_dir
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__: List[str] = Vector()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowerCAmelCase__ ) , '(0,0,0,0,0,1)' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = Vector([1, 2] )
lowercase__: Optional[int] = Vector([1, 2, 3, 4, 5] )
lowercase__: Dict = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__: int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = Vector([1, 2, 3] )
lowercase__: str = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Tuple = Vector([1, 2, 3] )
lowercase__: List[str] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = Vector([1, 2, 3] )
lowercase__: Optional[Any] = Vector([2, -1, 4] ) # for test of dot product
lowercase__: Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = Vector([1, 2, 3] )
lowercase__: List[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowerCAmelCase__ , lowerCAmelCase__ ) ) , '(3,4,7)' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = Vector([1, 0, 0, 0, 0, 0] )
lowercase__: Union[str, Any] = x.copy()
self.assertEqual(str(lowerCAmelCase__ ) , str(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowerCAmelCase__ ) , '(0,1,0)' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Optional[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Optional[int] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__: Dict = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 196 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = "roformer"
def __init__( self : List[str] , A : List[Any]=50000 , A : Dict=None , A : Tuple=768 , A : Optional[Any]=12 , A : Tuple=12 , A : Dict=3072 , A : List[Any]="gelu" , A : Optional[Any]=0.1 , A : str=0.1 , A : int=1536 , A : Optional[int]=2 , A : List[str]=0.02 , A : List[str]=1E-12 , A : Optional[int]=0 , A : List[Any]=False , A : List[str]=True , **A : Dict , ):
super().__init__(pad_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = rotary_value
_UpperCAmelCase : Optional[int] = use_cache
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@property
def _A ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "sequence"}
_UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> bool:
if len(__lowercase ) == 0:
return False
A_ : str = len(__lowercase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowercase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowercase )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase : List[str] = [int(item.strip()) for item in user_input.split(''',''')]
_lowerCAmelCase : Optional[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase : int = '''''' if binary_search(sequence, target) else '''not '''
print(F'''{target} was {not_str}found in {sequence}''')
| 300 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : List[Any] = data
# Initialize hash values
_lowerCamelCase : Optional[int] = [
0X6A09E667,
0XBB67AE85,
0X3C6EF372,
0XA54FF53A,
0X510E527F,
0X9B05688C,
0X1F83D9AB,
0X5BE0CD19,
]
# Initialize round constants
_lowerCamelCase : Tuple = [
0X428A2F98,
0X71374491,
0XB5C0FBCF,
0XE9B5DBA5,
0X3956C25B,
0X59F111F1,
0X923F82A4,
0XAB1C5ED5,
0XD807AA98,
0X12835B01,
0X243185BE,
0X550C7DC3,
0X72BE5D74,
0X80DEB1FE,
0X9BDC06A7,
0XC19BF174,
0XE49B69C1,
0XEFBE4786,
0X0FC19DC6,
0X240CA1CC,
0X2DE92C6F,
0X4A7484AA,
0X5CB0A9DC,
0X76F988DA,
0X983E5152,
0XA831C66D,
0XB00327C8,
0XBF597FC7,
0XC6E00BF3,
0XD5A79147,
0X06CA6351,
0X14292967,
0X27B70A85,
0X2E1B2138,
0X4D2C6DFC,
0X53380D13,
0X650A7354,
0X766A0ABB,
0X81C2C92E,
0X92722C85,
0XA2BFE8A1,
0XA81A664B,
0XC24B8B70,
0XC76C51A3,
0XD192E819,
0XD6990624,
0XF40E3585,
0X106AA070,
0X19A4C116,
0X1E376C08,
0X2748774C,
0X34B0BCB5,
0X391C0CB3,
0X4ED8AA4A,
0X5B9CCA4F,
0X682E6FF3,
0X748F82EE,
0X78A5636F,
0X84C87814,
0X8CC70208,
0X90BEFFFA,
0XA4506CEB,
0XBEF9A3F7,
0XC67178F2,
]
_lowerCamelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : Optional[int] = B'\x80' + (B'\x00' * (63 - (len(lowercase ) + 8) % 64))
_lowerCamelCase : List[Any] = struct.pack('>Q' , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def A_ ( self ):
_lowerCamelCase : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCamelCase : Optional[Any] = list(struct.unpack('>16L' , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCamelCase : List[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
_lowerCamelCase : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
_lowerCamelCase : Optional[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
_lowerCamelCase : str = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
_lowerCamelCase : Tuple = (e & f) ^ ((~e & 0XFFFFFFFF) & g)
_lowerCamelCase : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
_lowerCamelCase : int = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
_lowerCamelCase : Dict = (a & b) ^ (a & c) ^ (b & c)
_lowerCamelCase : Dict = (sa + maj) % 0X100000000
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
_lowerCamelCase : List[str] = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCamelCase : Dict = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
_lowerCamelCase : str = ''.join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def A_ ( self , lowercase , lowercase ):
return 0XFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
import hashlib
_lowerCamelCase : Any = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : int = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCamelCase : int = f.read()
else:
_lowerCamelCase : Union[str, Any] = bytes(__lowercase , 'utf-8' )
print(SHAaaa(__lowercase ).hash )
if __name__ == "__main__":
main() | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "bart"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , A : Optional[int]=50_265 , A : List[Any]=1_024 , A : Tuple=12 , A : Optional[Any]=4_096 , A : str=16 , A : Any=12 , A : int=4_096 , A : List[str]=16 , A : Any=0.0 , A : Any=0.0 , A : Dict="gelu" , A : str=1_024 , A : Optional[int]=0.1 , A : List[str]=0.0 , A : List[str]=0.0 , A : Tuple=0.02 , A : Optional[Any]=0.0 , A : Any=False , A : List[Any]=True , A : Optional[Any]=3 , A : List[Any]=1 , A : List[str]=0 , A : Dict=2 , A : List[str]=True , A : List[str]=2 , A : Union[str, Any]=2 , **A : int , ):
__snake_case: Optional[Any] = vocab_size
__snake_case: Union[str, Any] = max_position_embeddings
__snake_case: List[str] = d_model
__snake_case: int = encoder_ffn_dim
__snake_case: str = encoder_layers
__snake_case: Dict = encoder_attention_heads
__snake_case: Union[str, Any] = decoder_ffn_dim
__snake_case: str = decoder_layers
__snake_case: Any = decoder_attention_heads
__snake_case: Optional[Any] = dropout
__snake_case: int = attention_dropout
__snake_case: int = activation_dropout
__snake_case: Union[str, Any] = activation_function
__snake_case: str = init_std
__snake_case: int = encoder_layerdrop
__snake_case: Optional[Any] = decoder_layerdrop
__snake_case: Union[str, Any] = classifier_dropout
__snake_case: str = use_cache
__snake_case: Any = encoder_layers
__snake_case: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A ):
__snake_case: Optional[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__snake_case: Optional[Any] = {0: """batch"""}
__snake_case: str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__snake_case: Tuple = {0: """batch""", 1: """decoder_sequence"""}
__snake_case: Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case: str = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__snake_case , __snake_case: Any = self.num_layers
for i in range(A ):
__snake_case: List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
__snake_case: List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__snake_case: Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase__ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Optional[Any] = super().outputs
else:
__snake_case: Union[str, Any] = super(A , self ).outputs
if self.use_past:
__snake_case , __snake_case: List[Any] = self.num_layers
for i in range(A ):
__snake_case: Dict = {0: """batch""", 2: """past_sequence + sequence"""}
__snake_case: int = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase__ ( self : Optional[int] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
# Generate decoder inputs
__snake_case: List[Any] = seq_length if not self.use_past else 1
__snake_case: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
__snake_case: Tuple = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case: Any = dict(**A , **A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case: Dict = common_inputs["""input_ids"""].shape
__snake_case: Dict = common_inputs["""decoder_input_ids"""].shape[1]
__snake_case , __snake_case: int = self.num_attention_heads
__snake_case: Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case: int = decoder_seq_length + 3
__snake_case: Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case: Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 )
__snake_case: Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case: str = self.num_layers
__snake_case: str = min(A , A )
__snake_case: Any = max(A , A ) - min_num_layers
__snake_case: List[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
__snake_case: Optional[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(A , A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__snake_case , __snake_case: Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__snake_case: Union[str, Any] = seqlen + 2
__snake_case , __snake_case: Dict = self.num_layers
__snake_case , __snake_case: Dict = self.num_attention_heads
__snake_case: Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case: Optional[Any] = common_inputs["""attention_mask"""].dtype
__snake_case: int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
__snake_case: List[str] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCAmelCase__ ( self : List[str] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
__snake_case: Optional[Any] = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case: List[Any] = tokenizer.num_special_tokens_to_add(A )
__snake_case: int = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
__snake_case: Union[str, Any] = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case: Union[str, Any] = dict(tokenizer(A , return_tensors=A ) )
return common_inputs
def UpperCAmelCase__ ( self : List[Any] , A : PreTrainedTokenizer , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
elif self.task == "causal-lm":
__snake_case: str = self._generate_dummy_inputs_for_causal_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
else:
__snake_case: str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
return common_inputs
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : Tuple , A : int , A : int ):
if self.task in ["default", "seq2seq-lm"]:
__snake_case: Any = super()._flatten_past_key_values_(A , A , A , A )
else:
__snake_case: List[Any] = super(A , self )._flatten_past_key_values_(
A , A , A , A )
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 50 ):
UpperCAmelCase : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->None:
"""simple docstring"""
a_ = len(__lowercase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowercase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowercase , __lowercase , )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = []
depth_first_search([] , [] , [] , __lowercase , __lowercase )
# Print all the boards
for board in boards:
for column in board:
print(__lowercase )
print("" )
print(len(__lowercase ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4) | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
'''simple docstring'''
import os
import string
import sys
__a = 1 << 8
__a = {
'''tab''': ord('\t'),
'''newline''': ord('\r'),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
__a = KEYMAP['''up''']
__a = KEYMAP['''left''']
if sys.platform == "win32":
__a = []
__a = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
__a = ord(str(i))
def __UpperCAmelCase ( ):
if os.name == "nt":
import msvcrt
_UpperCAmelCase : List[Any] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowercase ) == 0:
# Read the keystroke
_UpperCAmelCase : List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase : int = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase : Dict = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(__lowercase )
if ord(__lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_UpperCAmelCase : Optional[Any] = chr(KEYMAP["esc"] )
except KeyError:
_UpperCAmelCase : Optional[int] = cha[1]
else:
_UpperCAmelCase : Union[str, Any] = ch.decode(__lowercase )
else:
_UpperCAmelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase : int = sys.stdin.fileno()
_UpperCAmelCase : int = termios.tcgetattr(__lowercase )
try:
tty.setraw(__lowercase )
_UpperCAmelCase : Union[str, Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowercase, termios.TCSADRAIN, __lowercase )
return ch
def __UpperCAmelCase ( ):
_UpperCAmelCase : Tuple = get_raw_chars()
if ord(__lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowercase ) == KEYMAP["esc"]:
_UpperCAmelCase : Optional[int] = get_raw_chars()
if ord(__lowercase ) == KEYMAP["mod_int"]:
_UpperCAmelCase : Optional[int] = get_raw_chars()
if ord(__lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 145 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( __snake_case : str ) -> list[int]:
return [ord(__lowercase ) - 96 for elem in plain]
def __magic_name__ ( __snake_case : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def __magic_name__ ( ) -> None:
lowercase : Optional[Any] = encode(input("-> " ).strip().lower() )
print("Encoded: " , __lowercase )
print("Decoded:" , decode(__lowercase ) )
if __name__ == "__main__":
main()
| 202 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : int , lowercase_ : int , lowercase_ : float = 0 ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase : Tuple = row, column
UpperCAmelCase : Tuple = [[default_value for c in range(lowercase_ )] for r in range(lowercase_ )]
def __str__( self : int ) -> List[Any]:
UpperCAmelCase : List[Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCAmelCase : Dict = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase : Any = max(lowercase_ , len(str(lowercase_ ) ) )
UpperCAmelCase : str = f"""%{max_element_length}s"""
# Make string and return
def single_line(lowercase_ : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase : int = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase_ ) for row_vector in self.array )
return s
def __repr__( self : Dict ) -> Optional[Any]:
return str(self )
def UpperCAmelCase_ ( self : str , lowercase_ : tuple[int, int] ) -> List[str]:
if not (isinstance(lowercase_ , (list, tuple) ) and len(lowercase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase_ : tuple[int, int] ) -> str:
assert self.validate_indicies(lowercase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase_ : tuple[int, int] , lowercase_ : float ) -> List[str]:
assert self.validate_indicies(lowercase_ )
UpperCAmelCase : Any = value
def __add__( self : str , lowercase_ : Matrix ) -> Dict:
assert isinstance(lowercase_ , lowercase_ )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase : Union[str, Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> str:
UpperCAmelCase : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase : Any = -self[r, c]
return result
def __sub__( self : List[str] , lowercase_ : Matrix ) -> List[Any]:
return self + (-another)
def __mul__( self : Dict , lowercase_ : int | float | Matrix ) -> Union[str, Any]:
if isinstance(lowercase_ , (int, float) ): # Scalar multiplication
UpperCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase : Optional[int] = self[r, c] * another
return result
elif isinstance(lowercase_ , lowercase_ ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase : List[str] = f"""Unsupported type given for another ({type(lowercase_ )})"""
raise TypeError(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
UpperCAmelCase : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase : List[Any] = self[r, c]
return result
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Matrix , lowercase_ : Matrix ) -> Optional[Any]:
assert isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase : List[str] = v.transpose()
UpperCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase( ):
# a^(-1)
UpperCAmelCase : Optional[int] = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase : Union[str, Any] = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
UpperCAmelCase : Optional[int] = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = 1, 2, -3
UpperCAmelCase : List[str] = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}""" )
def UpperCamelCase( ):
import doctest
doctest.testmod()
testa()
| 151 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[int]:
# Initialise PyTorch model
lowercase__: Tuple = MobileBertConfig.from_json_file(__lowercase )
print(f'Building PyTorch model from configuration: {config}' )
lowercase__: Tuple = MobileBertForPreTraining(__lowercase )
# Load weights from tf checkpoint
lowercase__: Optional[Any] = load_tf_weights_in_mobilebert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__SCREAMING_SNAKE_CASE : str = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__SCREAMING_SNAKE_CASE : List[Any] = os.environ.get("""USER_TOKEN""", """""")
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> dict[Any, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = {
"Authorization": F"""token {auth_token}""",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__lowercase , headers=__lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("""\'USER_TOKEN\' field cannot be empty.""")
| 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "tf_padding" ) )
self.parent.assertTrue(hasattr(snake_case , "depth_multiplier" ) )
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :List[str] , snake_case :Dict=13 , snake_case :Optional[int]=3 , snake_case :Any=32 , snake_case :Any=0.25 , snake_case :Union[str, Any]=8 , snake_case :List[Any]=8 , snake_case :List[Any]=6 , snake_case :Dict=32 , snake_case :Dict=True , snake_case :Optional[Any]=True , snake_case :Tuple=True , snake_case :Tuple="relu6" , snake_case :List[Any]=1_280 , snake_case :Optional[Any]=0.1 , snake_case :int=0.02 , snake_case :Optional[Any]=True , snake_case :List[str]=True , snake_case :List[str]=10 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Union[str, Any] = parent
A_ : Tuple = batch_size
A_ : str = num_channels
A_ : Tuple = image_size
A_ : List[str] = depth_multiplier
A_ : str = depth_divisible_by
A_ : Union[str, Any] = min_depth
A_ : Optional[int] = expand_ratio
A_ : Dict = tf_padding
A_ : List[str] = output_stride
A_ : Tuple = first_layer_is_expansion
A_ : Optional[Any] = finegrained_output
A_ : Dict = hidden_act
A_ : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
A_ : Tuple = classifier_dropout_prob
A_ : List[Any] = use_labels
A_ : Tuple = is_training
A_ : Optional[int] = num_labels
A_ : Tuple = initializer_range
A_ : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :str , snake_case :List[str] , snake_case :str , snake_case :Dict ):
'''simple docstring'''
A_ : List[Any] = MobileNetVaModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : Any = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :Optional[Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Dict = MobileNetVaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : List[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int , snake_case :Dict , snake_case :int , snake_case :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.num_labels
A_ : List[str] = MobileNetVaForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
A_ : Any = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Any = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Any = MobileNetVaModelTester(self )
A_ : List[str] = MobileNetVaConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Tuple = model_class(snake_case )
A_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Any = [*signature.parameters.keys()]
A_ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
def check_hidden_states_output(snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :str ):
A_ : int = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
A_ : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
A_ : Union[str, Any] = outputs.hidden_states
A_ : List[str] = 16
self.assertEqual(len(snake_case ) , snake_case )
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = MobileNetVaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Union[str, Any]:
A_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(snake_case )
A_ : int = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : Union[str, Any] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**snake_case )
# verify the logits
A_ : Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
A_ : Tuple = model.to(snake_case )
A_ : str = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
A_ : Tuple = prepare_img()
A_ : str = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[Any] = model(**snake_case )
A_ : Any = outputs.logits
# verify the logits
A_ : Any = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , snake_case )
A_ : Tuple = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
| 300 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__="attention" ):
_lowerCamelCase : List[str] = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
_lowerCamelCase : int = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
_lowerCamelCase : Optional[Any] = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
_lowerCamelCase : int = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
if split_mlp_wi:
_lowerCamelCase : str = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
_lowerCamelCase : Dict = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
_lowerCamelCase : Union[str, Any] = (wi_a, wi_a)
else:
_lowerCamelCase : List[Any] = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
_lowerCamelCase : str = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def _snake_case ( lowercase__ , *, lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = traverse_util.flatten_dict(variables['target'] )
_lowerCamelCase : Tuple = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCamelCase : Optional[int] = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
_lowerCamelCase : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
_lowerCamelCase : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
_lowerCamelCase : Tuple = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
_lowerCamelCase : List[str] = layer_norm
_lowerCamelCase : Optional[Any] = k.T
_lowerCamelCase : int = o.T
_lowerCamelCase : Dict = q.T
_lowerCamelCase : Any = v.T
# Block i, layer 1 (MLP).
_lowerCamelCase : Optional[int] = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
_lowerCamelCase, _lowerCamelCase : Tuple = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
_lowerCamelCase : str = layer_norm
if split_mlp_wi:
_lowerCamelCase : int = wi[0].T
_lowerCamelCase : Optional[int] = wi[1].T
else:
_lowerCamelCase : int = wi.T
_lowerCamelCase : Union[str, Any] = wo.T
_lowerCamelCase : List[Any] = old[
'encoder/relpos_bias/rel_embedding'
].T
_lowerCamelCase : int = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
_lowerCamelCase : str = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
_lowerCamelCase : Union[str, Any] = layer_norm
_lowerCamelCase : Optional[int] = k.T
_lowerCamelCase : Dict = o.T
_lowerCamelCase : Union[str, Any] = q.T
_lowerCamelCase : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
_lowerCamelCase : Any = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
_lowerCamelCase : Optional[int] = layer_norm
_lowerCamelCase : Tuple = k.T
_lowerCamelCase : Dict = o.T
_lowerCamelCase : Optional[Any] = q.T
_lowerCamelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
_lowerCamelCase : Union[str, Any] = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
_lowerCamelCase, _lowerCamelCase : Tuple = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
_lowerCamelCase : Tuple = layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = wi[0].T
_lowerCamelCase : List[Any] = wi[1].T
else:
_lowerCamelCase : List[Any] = wi.T
_lowerCamelCase : Optional[int] = wo.T
_lowerCamelCase : Any = old['decoder/decoder_norm/scale']
_lowerCamelCase : str = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCamelCase : List[str] = old['decoder/logits_dense/kernel'].T
return new
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCamelCase : str = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCamelCase : List[str] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_lowerCamelCase : List[str] = state_dict['shared.weight']
return state_dict
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : List[Any] = checkpoints.load_tax_checkpoint(__lowercase )
_lowerCamelCase : Dict = convert_tax_to_pytorch(__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase )
_lowerCamelCase : Union[str, Any] = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = False ):
_lowerCamelCase : List[str] = TaConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCamelCase : Optional[Any] = TaEncoderModel(__lowercase )
else:
_lowerCamelCase : Union[str, Any] = TaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowercase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ["pixel_values"]
def __init__( self : str ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : int = 0.9 ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : List[str] ,) -> Dict:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = crop_pct
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[float] = None ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> Union[str, Any]:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
A = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A = int(size['height'] / crop_pct )
else:
A = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(A_ ) )
A = get_resize_output_image_size(A_ ,size=A_ ,default_to_square=A_ )
else:
if "shortest_edge" in size:
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
elif "height" in size and "width" in size:
A = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(A_ ) )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> int:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Optional[int] ,) -> Optional[int]:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Union[str, Any] ,) -> str:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : ImageInput ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : int = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : ChannelDimension = ChannelDimension.FIRST ,**A_ : Optional[int] ,) -> Union[str, Any]:
A = do_resize if do_resize is not None else self.do_resize
A = crop_pct if crop_pct is not None else self.crop_pct
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,crop_pct=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ ) | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , A : int , A : int , A : int , A : float , A : int , A : int , A : int , A : int , A : str , A : bool = False , ):
super().__init__()
__snake_case: Tuple = nn.Embedding(A , A )
__snake_case: str = nn.Embedding(A , A )
__snake_case: List[str] = False
__snake_case: Tuple = nn.Dropout(p=A )
__snake_case: Optional[int] = TaConfig(
vocab_size=A , d_model=A , num_heads=A , d_kv=A , d_ff=A , dropout_rate=A , feed_forward_proj=A , is_decoder=A , is_encoder_decoder=A , )
__snake_case: str = nn.ModuleList()
for lyr_num in range(A ):
__snake_case: List[str] = TaBlock(A )
self.encoders.append(A )
__snake_case: Tuple = TaLayerNorm(A )
__snake_case: Tuple = nn.Dropout(p=A )
def UpperCAmelCase__ ( self : Dict , A : Any , A : str ):
__snake_case: Union[str, Any] = self.token_embedder(A )
__snake_case: Any = encoder_input_tokens.shape[1]
__snake_case: Dict = torch.arange(A , device=encoder_input_tokens.device )
x += self.position_encoding(A )
__snake_case: Optional[int] = self.dropout_pre(A )
# inverted the attention mask
__snake_case: Optional[Any] = encoder_input_tokens.size()
__snake_case: str = self.get_extended_attention_mask(A , A )
for lyr in self.encoders:
__snake_case: Union[str, Any] = lyr(A , A )[0]
__snake_case: Union[str, Any] = self.layer_norm(A )
return self.dropout_post(A ), encoder_inputs_mask
| 111 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A: Dict = logging.get_logger(__name__)
A: List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A: List[Any] = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A: Tuple = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
A: str = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = RetriBertTokenizer
__lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : int = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : Optional[int] = do_lower_case
UpperCAmelCase : Any = strip_accents
UpperCAmelCase : Any = tokenize_chinese_chars
UpperCAmelCase : Optional[int] = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ) ->Tuple:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ) ->str:
"""simple docstring"""
a_ = "mock-s3-bucket"
a_ = F'''s3://{mock_bucket}'''
a_ = extract_path_from_uri(__lowercase )
assert dataset_path.startswith("s3://" ) is False
a_ = "./local/path"
a_ = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = is_remote_filesystem(__lowercase )
assert is_remote is True
a_ = fsspec.filesystem("file" )
a_ = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , __lowercase )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
a_ = input_paths[compression_fs_class.protocol]
if input_path is None:
a_ = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
a_ = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
a_ = os.path.basename(__lowercase )
a_ = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(__lowercase , "r" , encoding="utf-8" ) as f, open(__lowercase , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
a_ = compressed_file_paths[protocol]
a_ = "dataset.jsonl"
a_ = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
a_ , *a_ = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = hf_api.dataset_info(__lowercase , token=__lowercase )
a_ = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(__lowercase ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
a_ = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
) | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 128}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : str = ["input_ids", "attention_mask"]
UpperCamelCase_ : Dict = BlenderbotTokenizer
def __init__( self : List[Any] , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Dict="replace" , lowerCAmelCase__ : Optional[int]="<s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : Any="</s>" , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Optional[int]=True , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : Optional[Any] = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Optional[int] = add_prefix_space
_UpperCAmelCase : Any = pre_tok_class(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : List[str] = "post_processor"
_UpperCAmelCase : str = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
_UpperCAmelCase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase : Optional[Any] = tuple(state["cls"] )
_UpperCAmelCase : Optional[Any] = False
if state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : int = add_prefix_space
_UpperCAmelCase : Optional[int] = True
if state.get("trim_offsets" , lowerCAmelCase__ ) != trim_offsets:
_UpperCAmelCase : Optional[int] = trim_offsets
_UpperCAmelCase : int = True
if changes_to_apply:
_UpperCAmelCase : Optional[Any] = getattr(lowerCAmelCase__ , state.pop("type" ) )
_UpperCAmelCase : str = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
_UpperCAmelCase : Optional[int] = value
def _lowerCAmelCase ( self : int , *lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = kwargs.get("is_split_into_words" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : "Conversation" ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = " ".join(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
_UpperCAmelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids | 145 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
_A : Union[str, Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
_A : Dict = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
_A : Dict = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __magic_name__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def __magic_name__ ( self , _a , _a , _a=None , _a=False , _a=False , _a=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase : Dict = np.array([re.sub(_a , "" , _a ) for x in predictions] )
lowercase : Optional[int] = np.array([re.sub(_a , "" , _a ) for x in references] )
else:
lowercase : Optional[int] = np.asarray(_a )
lowercase : str = np.asarray(_a )
if ignore_case:
lowercase : str = np.char.lower(_a )
lowercase : str = np.char.lower(_a )
if ignore_punctuation:
lowercase : List[str] = string.punctuation.maketrans("" , "" , string.punctuation )
lowercase : Optional[int] = np.char.translate(_a , table=_a )
lowercase : Optional[int] = np.char.translate(_a , table=_a )
if ignore_numbers:
lowercase : Dict = string.digits.maketrans("" , "" , string.digits )
lowercase : Optional[Any] = np.char.translate(_a , table=_a )
lowercase : Union[str, Any] = np.char.translate(_a , table=_a )
lowercase : Optional[int] = predictions == references
return {"exact_match": np.mean(_a ) * 100}
| 202 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase__ = True
from torch.cuda.amp import autocast
lowercase__ = logging.getLogger(__name__)
def UpperCamelCase( UpperCAmelCase_=None , UpperCAmelCase_=None ):
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCAmelCase_ : Optional[str] = field(
default=_snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCAmelCase_ : Optional[bool] = field(
default=_snake_case , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCAmelCase_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
UpperCAmelCase_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
UpperCAmelCase_ : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
UpperCAmelCase_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
UpperCAmelCase_ : Optional[float] = field(
default=0.0_5 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
UpperCAmelCase_ : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[str] = field(
default=_snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase_ : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCAmelCase_ : bool = field(
default=_snake_case , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCAmelCase_ : Optional[int] = field(
default=_snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCAmelCase_ : Optional[int] = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ : Optional[int] = field(
default=_snake_case , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : WavaVecaProcessor
UpperCAmelCase_ : Union[bool, str] = True
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
def __call__( self : Tuple , lowercase_ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> List[str]:
UpperCAmelCase : List[Any] = [{'input_values': feature['input_values']} for feature in features]
UpperCAmelCase : str = [{'input_ids': feature['labels']} for feature in features]
UpperCAmelCase : List[str] = self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
UpperCAmelCase : Union[str, Any] = self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
UpperCAmelCase : str = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
UpperCAmelCase : List[str] = labels
return batch
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : nn.Module , lowercase_ : Dict[str, Union[torch.Tensor, Any]] ) -> Tuple:
model.train()
UpperCAmelCase : List[Any] = self._prepare_inputs(lowercase_ )
if self.use_amp:
with autocast():
UpperCAmelCase : int = self.compute_loss(lowercase_ , lowercase_ )
else:
UpperCAmelCase : str = self.compute_loss(lowercase_ , lowercase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase : Union[str, Any] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']""" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_ )
else:
loss.backward()
return loss.detach()
def UpperCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __lowercase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCAmelCase : Tuple = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCAmelCase : int = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
UpperCAmelCase : Union[str, Any] = F"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = re.sub(__lowercase , '' , batch['sentence'] ).lower() + ' '
return batch
UpperCAmelCase : Dict = train_dataset.map(__lowercase , remove_columns=['sentence'] )
UpperCAmelCase : int = eval_dataset.map(__lowercase , remove_columns=['sentence'] )
def extract_all_chars(UpperCAmelCase_ ):
UpperCAmelCase : int = ' '.join(batch['text'] )
UpperCAmelCase : int = list(set(__lowercase ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCAmelCase : str = train_dataset.map(
__lowercase , batched=__lowercase , batch_size=-1 , keep_in_memory=__lowercase , remove_columns=train_dataset.column_names , )
UpperCAmelCase : Union[str, Any] = train_dataset.map(
__lowercase , batched=__lowercase , batch_size=-1 , keep_in_memory=__lowercase , remove_columns=eval_dataset.column_names , )
UpperCAmelCase : List[Any] = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
UpperCAmelCase : List[str] = {v: k for k, v in enumerate(__lowercase )}
UpperCAmelCase : Tuple = vocab_dict[' ']
del vocab_dict[" "]
UpperCAmelCase : List[Any] = len(__lowercase )
UpperCAmelCase : int = len(__lowercase )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(__lowercase , __lowercase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase : Any = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=__lowercase , return_attention_mask=__lowercase )
UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCAmelCase : str = min(len(__lowercase ) , data_args.max_train_samples )
UpperCAmelCase : List[Any] = train_dataset.select(range(__lowercase ) )
if data_args.max_val_samples is not None:
UpperCAmelCase : Tuple = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCAmelCase : Optional[int] = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = torchaudio.load(batch['path'] )
UpperCAmelCase : Union[str, Any] = resampler(__lowercase ).squeeze().numpy()
UpperCAmelCase : Optional[int] = 1_60_00
UpperCAmelCase : Union[str, Any] = batch['text']
return batch
UpperCAmelCase : Union[str, Any] = train_dataset.map(
__lowercase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase : Dict = eval_dataset.map(
__lowercase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
UpperCAmelCase : int = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(__lowercase )
return batch
UpperCAmelCase : List[Any] = train_dataset.map(
__lowercase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase : int = eval_dataset.map(
__lowercase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCAmelCase : int = datasets.load_metric('wer' )
def compute_metrics(UpperCAmelCase_ ):
UpperCAmelCase : List[str] = pred.predictions
UpperCAmelCase : str = np.argmax(__lowercase , axis=-1 )
UpperCAmelCase : Tuple = processor.tokenizer.pad_token_id
UpperCAmelCase : int = processor.batch_decode(__lowercase )
# we do not want to group tokens when computing the metrics
UpperCAmelCase : Dict = processor.batch_decode(pred.label_ids , group_tokens=__lowercase )
UpperCAmelCase : str = wer_metric.compute(predictions=__lowercase , references=__lowercase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCAmelCase : int = DataCollatorCTCWithPadding(processor=__lowercase , padding=__lowercase )
# Initialize our Trainer
UpperCAmelCase : Optional[int] = CTCTrainer(
model=__lowercase , data_collator=__lowercase , args=__lowercase , compute_metrics=__lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase : Dict = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase : Tuple = model_args.model_name_or_path
else:
UpperCAmelCase : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
UpperCAmelCase : Dict = train_result.metrics
UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
UpperCAmelCase : str = min(__lowercase , len(__lowercase ) )
trainer.log_metrics('train' , __lowercase )
trainer.save_metrics('train' , __lowercase )
trainer.save_state()
# Evaluation
UpperCAmelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase : int = trainer.evaluate()
UpperCAmelCase : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(__lowercase )
UpperCAmelCase : List[str] = min(__lowercase , len(__lowercase ) )
trainer.log_metrics('eval' , __lowercase )
trainer.save_metrics('eval' , __lowercase )
return results
if __name__ == "__main__":
main()
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Union[str, Any] = XLMProphetNetTokenizer
__lowercase : int = False
__lowercase : Any = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[str] = XLMProphetNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Any = '[PAD]'
lowercase__: Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_012 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: int = XLMProphetNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
lowercase__: Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__: Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__: Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase__: str = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 'Hello World!'
lowercase__: Union[str, Any] = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 196 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Tuple = 8.9_88E9 # units = N * m^s * C^-2
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> dict[str, float]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
_UpperCAmelCase : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_UpperCAmelCase : Dict = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_UpperCAmelCase : Tuple = abs(__lowercase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_UpperCAmelCase : str = (COULOMBS_CONSTANT * charge_product / abs(__lowercase )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Tuple:
A_ : List[str] = checkpoint
A_ : Any = {}
A_ : Optional[int] = vae_state_dict["encoder.conv_in.weight"]
A_ : Any = vae_state_dict["encoder.conv_in.bias"]
A_ : Any = vae_state_dict["encoder.conv_out.weight"]
A_ : Dict = vae_state_dict["encoder.conv_out.bias"]
A_ : List[str] = vae_state_dict["encoder.norm_out.weight"]
A_ : Union[str, Any] = vae_state_dict["encoder.norm_out.bias"]
A_ : Dict = vae_state_dict["decoder.conv_in.weight"]
A_ : Tuple = vae_state_dict["decoder.conv_in.bias"]
A_ : Tuple = vae_state_dict["decoder.conv_out.weight"]
A_ : str = vae_state_dict["decoder.conv_out.bias"]
A_ : int = vae_state_dict["decoder.norm_out.weight"]
A_ : Dict = vae_state_dict["decoder.norm_out.bias"]
A_ : Any = vae_state_dict["quant_conv.weight"]
A_ : Dict = vae_state_dict["quant_conv.bias"]
A_ : Dict = vae_state_dict["post_quant_conv.weight"]
A_ : List[str] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A_ : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A_ : Tuple = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(__lowercase )
}
# Retrieves the keys for the decoder up blocks only
A_ : Union[str, Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A_ : int = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(__lowercase )
}
for i in range(__lowercase ):
A_ : Dict = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
A_ : Union[str, Any] = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
A_ : Optional[int] = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
A_ : List[Any] = renew_vae_resnet_paths(__lowercase )
A_ : Optional[Any] = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A_ : List[str] = [key for key in vae_state_dict if "encoder.mid.block" in key]
A_ : int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A_ : Tuple = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
A_ : Any = renew_vae_resnet_paths(__lowercase )
A_ : Tuple = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A_ : Optional[Any] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A_ : Union[str, Any] = renew_vae_attention_paths(__lowercase )
A_ : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
for i in range(__lowercase ):
A_ : Any = num_up_blocks - 1 - i
A_ : Dict = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
A_ : Union[str, Any] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
A_ : List[Any] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
A_ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
A_ : Union[str, Any] = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A_ : List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
A_ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A_ : List[str] = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
A_ : Optional[Any] = renew_vae_resnet_paths(__lowercase )
A_ : str = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
A_ : List[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A_ : Dict = renew_vae_attention_paths(__lowercase )
A_ : Any = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__lowercase , __lowercase , __lowercase , additional_replacements=[meta_path] , config=__lowercase )
conv_attn_to_linear(__lowercase )
return new_checkpoint
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , ) -> int:
# Only support V1
A_ : Dict = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A_ : List[str] = io.BytesIO(r.content )
A_ : List[Any] = OmegaConf.load(__lowercase )
A_ : Tuple = 512
A_ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A_ : Dict = {}
with safe_open(__lowercase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A_ : Tuple = f.get_tensor(__lowercase )
else:
A_ : Optional[Any] = torch.load(__lowercase , map_location=__lowercase )["state_dict"]
# Convert the VAE model.
A_ : Union[str, Any] = create_vae_diffusers_config(__lowercase , image_size=__lowercase )
A_ : str = custom_convert_ldm_vae_checkpoint(__lowercase , __lowercase )
A_ : str = AutoencoderKL(**__lowercase )
vae.load_state_dict(__lowercase )
vae.save_pretrained(__lowercase )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_lowerCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 300 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = 'hf-internal-testing/tiny-random-t5'
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowercase )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
_lowerCamelCase : List[str] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_lowerCamelCase : List[str] = model.generate(**lowercase )
_lowerCamelCase : Tuple = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_lowerCamelCase : Optional[Any] = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-t5'
_lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_lowerCamelCase : List[str] = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowercase = {
'''google/pegasus-xsum''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = VOCAB_FILES_NAMES
_lowerCamelCase: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = PegasusTokenizer
_lowerCamelCase: Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple ,A_ : str=None ,A_ : Any=None ,A_ : List[Any]="<pad>" ,A_ : List[Any]="</s>" ,A_ : Tuple="<unk>" ,A_ : Any="<mask_2>" ,A_ : List[str]="<mask_1>" ,A_ : List[Any]=None ,A_ : Dict=103 ,**A_ : Optional[Any] ,) -> Dict:
A = offset
if additional_special_tokens is not None:
if not isinstance(A_ ,A_ ):
raise TypeError(
F'additional_special_tokens should be of type {type(A_ )}, but is'
F' {type(A_ )}' )
A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(A_ ) ,self.offset - 1 )
]
if len(set(A_ ) ) != len(A_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
A = additional_special_tokens_extended
else:
A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 ,self.offset )]
super().__init__(
A_ ,tokenizer_file=A_ ,pad_token=A_ ,eos_token=A_ ,unk_token=A_ ,mask_token=A_ ,mask_token_sent=A_ ,offset=A_ ,additional_special_tokens=A_ ,**A_ ,)
A = vocab_file
A = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> int:
A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List ,A_ : Optional[List] = None ,A_ : bool = False ) -> List[Any]:
if already_has_special_tokens:
return self._special_token_mask(A_ )
elif token_ids_a is None:
return self._special_token_mask(A_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Any=None ) -> Optional[Any]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[str] = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file ,A_ )
return (out_vocab_file,) | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.999 , SCREAMING_SNAKE_CASE__="cosine" , ) -> Tuple:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE__):
return math.exp(t * -12.0)
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''')
__snake_case: str = []
for i in range(__lowercase):
__snake_case: Any = i / num_diffusion_timesteps
__snake_case: Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowercase) / alpha_bar_fn(__lowercase) , __lowercase))
return torch.tensor(__lowercase , dtype=torch.floataa)
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 1
@register_to_config
def __init__( self : str , A : int = 1_000 , A : float = 0.0001 , A : float = 0.02 , A : str = "linear" , A : Optional[Union[np.ndarray, List[float]]] = None , A : bool = True , A : bool = True , A : int = 0 , A : str = "epsilon" , A : float = 1.0 , **A : Union[str, Any] , ):
if kwargs.get("""set_alpha_to_one""" , A ) is not None:
__snake_case: List[str] = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , A , standard_warn=A )
__snake_case: Optional[Any] = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
__snake_case: Optional[int] = torch.tensor(A , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case: str = torch.linspace(A , A , A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case: List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case: Optional[Any] = betas_for_alpha_bar(A )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case: int = 1.0 - self.betas
__snake_case: Any = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__snake_case: Tuple = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__snake_case: Tuple = 1.0
# setable values
__snake_case: int = None
__snake_case: Dict = torch.from_numpy(np.arange(0 , A ).copy().astype(np.intaa ) )
def UpperCAmelCase__ ( self : List[str] , A : torch.FloatTensor , A : Optional[int] = None ):
return sample
def UpperCAmelCase__ ( self : List[Any] , A : int , A : Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__snake_case: int = num_inference_steps
__snake_case: Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case: Any = (np.arange(0 , A ) * step_ratio).round().copy().astype(np.intaa )
__snake_case: List[Any] = torch.from_numpy(A ).to(A )
self.timesteps += self.config.steps_offset
def UpperCAmelCase__ ( self : str , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : float = 0.0 , A : bool = False , A : Optional[torch.FloatTensor] = None , A : bool = True , ):
__snake_case: Tuple = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__snake_case: Union[str, Any] = self.alphas_cumprod[timestep]
__snake_case: str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__snake_case: Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__snake_case: Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__snake_case: str = model_output
elif self.config.prediction_type == "sample":
__snake_case: Union[str, Any] = model_output
__snake_case: str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__snake_case: List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__snake_case: List[Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__snake_case: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case: Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case: int = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=A , pred_original_sample=A )
def __len__( self : Union[str, Any] ):
return self.config.num_train_timesteps
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
A: List[Any] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A: Optional[Any] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
A: List[str] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def _snake_case ( UpperCamelCase : int ):
def remove_articles(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Any = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__lowercase , """ """ , __lowercase )
def white_space_fix(UpperCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : Any ):
UpperCAmelCase : Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Any ):
return int(normalize_answer(__lowercase ) == normalize_answer(__lowercase ) )
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : int ):
UpperCAmelCase : Dict = [any(compute_exact(__lowercase , __lowercase ) for ref in refs ) for pred, refs in zip(__lowercase , __lowercase )]
return (sum(__lowercase ) / len(__lowercase )) * 100
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : int ):
UpperCAmelCase : str = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase : List[Any] = Counter(__lowercase )
UpperCAmelCase : str = Counter(__lowercase )
UpperCAmelCase : List[str] = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase : Tuple = scount * numref
UpperCAmelCase : Optional[int] = Counter(__lowercase )
UpperCAmelCase : Any = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase : Union[str, Any] = ccount * numref
# KEEP
UpperCAmelCase : Tuple = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase : Any = keepgramcounter_rep & rgramcounter
UpperCAmelCase : str = sgramcounter_rep & rgramcounter
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Optional[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Any = 1
if len(__lowercase ) > 0:
UpperCAmelCase : Any = keeptmpscorea / len(__lowercase )
if len(__lowercase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase : Optional[int] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase : Tuple = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase : Dict = delgramcounter_rep - rgramcounter
UpperCAmelCase : Dict = sgramcounter_rep - rgramcounter
UpperCAmelCase : int = 0
UpperCAmelCase : Any = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : Dict = 1
if len(__lowercase ) > 0:
UpperCAmelCase : str = deltmpscorea / len(__lowercase )
# ADDITION
UpperCAmelCase : Optional[int] = set(__lowercase ) - set(__lowercase )
UpperCAmelCase : Optional[Any] = set(__lowercase ) & set(__lowercase )
UpperCAmelCase : Union[str, Any] = set(__lowercase ) - set(__lowercase )
UpperCAmelCase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Dict = 1
if len(__lowercase ) > 0:
UpperCAmelCase : Dict = addtmpscore / len(__lowercase )
if len(__lowercase ) > 0:
UpperCAmelCase : str = addtmpscore / len(__lowercase )
UpperCAmelCase : int = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
UpperCAmelCase : List[str] = len(__lowercase )
UpperCAmelCase : int = ssent.split(""" """ )
UpperCAmelCase : str = csent.split(""" """ )
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
UpperCAmelCase : str = []
UpperCAmelCase : int = []
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : str = []
UpperCAmelCase : Any = []
UpperCAmelCase : str = []
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Tuple = []
for rsent in rsents:
UpperCAmelCase : Union[str, Any] = rsent.split(""" """ )
UpperCAmelCase : Any = []
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = []
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
UpperCAmelCase : str = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 2:
UpperCAmelCase : Optional[Any] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 3:
UpperCAmelCase : List[str] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
UpperCAmelCase : Tuple = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
UpperCAmelCase : List[str] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
UpperCAmelCase : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
UpperCAmelCase : str = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
UpperCAmelCase : Union[str, Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
UpperCAmelCase : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__lowercase )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : List[Any] = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Union[str, Any] = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : int = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase : int = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase : int = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase : Tuple = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase : int = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(__lowercase )()(__lowercase )
else:
UpperCAmelCase : str = sacrebleu.TOKENIZERS[tokenizer]()(__lowercase )
elif tokenizer == "moses":
UpperCAmelCase : Any = sacremoses.MosesTokenizer().tokenize(__lowercase , return_str=__lowercase , escape=__lowercase )
elif tokenizer == "penn":
UpperCAmelCase : List[Any] = sacremoses.MosesTokenizer().penn_tokenize(__lowercase , return_str=__lowercase )
else:
UpperCAmelCase : str = sentence
if not return_str:
UpperCAmelCase : Tuple = normalized_sent.split()
return normalized_sent
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
if not (len(__lowercase ) == len(__lowercase ) == len(__lowercase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
UpperCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(__lowercase , __lowercase , __lowercase ):
sari_score += SARIsent(normalize(__lowercase ) , normalize(__lowercase ) , [normalize(__lowercase ) for sent in refs] )
UpperCAmelCase : Dict = sari_score / len(__lowercase )
return 100 * sari_score
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple="exp" , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any=False , UpperCamelCase : Tuple=False , ):
UpperCAmelCase : int = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCAmelCase : List[Any] = [[refs[i] for refs in references] for i in range(__lowercase )]
UpperCAmelCase : Dict = sacrebleu.corpus_bleu(
__lowercase , __lowercase , smooth_method=__lowercase , smooth_value=__lowercase , force=__lowercase , lowercase=__lowercase , use_effective_order=__lowercase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = {}
result.update({"""sari""": compute_sari(sources=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
result.update({"""exact""": compute_em(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )} )
return result
| 109 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
a_ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
return x if y == 0 else greatest_common_divisor(__lowercase, x % y )
def __UpperCAmelCase ( a_: int, a_: int ):
return (x * y) // greatest_common_divisor(__lowercase, __lowercase )
def __UpperCAmelCase ( a_: int = 20 ):
_UpperCAmelCase : List[str] = 1
for i in range(1, n + 1 ):
_UpperCAmelCase : Any = lcm(__lowercase, __lowercase )
return g
if __name__ == "__main__":
print(f'{solution() = }') | 145 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 0 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_A : List[Any] = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_A : Any = re.compile(r"""([a-z\d])([A-Z])""")
_A : Optional[int] = re.compile(r"""(?<!_)_(?!_)""")
_A : int = re.compile(r"""(_{2,})""")
_A : List[str] = r'''^\w+(\.\w+)*$'''
_A : Union[str, Any] = r'''<>:/\|?*'''
def __magic_name__ ( __snake_case : Any ) -> Dict:
lowercase : List[Any] = _uppercase_uppercase_re.sub(r"\1_\2" , __lowercase )
lowercase : int = _lowercase_uppercase_re.sub(r"\1_\2" , __lowercase )
return name.lower()
def __magic_name__ ( __snake_case : List[Any] ) -> Union[str, Any]:
lowercase : int = _single_underscore_re.split(__lowercase )
lowercase : Any = [_multiple_underscores_re.split(__lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowercase ) if n != "" )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
if os.path.basename(__lowercase ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(__lowercase )
def __magic_name__ ( __snake_case : Tuple , __snake_case : List[str] ) -> Optional[Any]:
if os.path.basename(__lowercase ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , __lowercase ):
raise ValueError(f"""Split name should match \'{_split_re}\'\' but got \'{split}\'.""" )
return f"""{filename_prefix_for_name(__lowercase )}-{split}"""
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int , __snake_case : Tuple=None ) -> int:
lowercase : int = filename_prefix_for_split(__lowercase , __lowercase )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
lowercase : List[str] = os.path.join(__lowercase , __lowercase )
return f"""{filepath}*"""
def __magic_name__ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str , __snake_case : int=None , __snake_case : int=None ) -> Union[str, Any]:
lowercase : List[str] = filename_prefix_for_split(__lowercase , __lowercase )
lowercase : Optional[int] = os.path.join(__lowercase , __lowercase )
if shard_lengths:
lowercase : Tuple = len(__lowercase )
lowercase : List[Any] = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__lowercase )]
if filetype_suffix:
lowercase : Dict = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase : List[Any] = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 202 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
from itertools import count
def UpperCamelCase( UpperCAmelCase_ = 50 ):
UpperCAmelCase : List[Any] = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 151 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 0 |
import numpy
# List of input, output pairs
__lowerCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCAmelCase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
__lowerCAmelCase = [2, 4, 1, 5]
__lowerCAmelCase = len(train_data)
__lowerCAmelCase = 0.009
def snake_case_ ( snake_case , snake_case="train" ) -> Union[str, Any]:
return calculate_hypothesis_value(__lowercase , __lowercase ) - output(
__lowercase , __lowercase )
def snake_case_ ( snake_case ) -> Union[str, Any]:
lowercase__: str = 0
for i in range(len(__lowercase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case_ ( snake_case , snake_case ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case_ ( snake_case , snake_case ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case_ ( snake_case , snake_case=m ) -> int:
lowercase__: Dict = 0
for i in range(__lowercase ):
if index == -1:
summation_value += _error(__lowercase )
else:
summation_value += _error(__lowercase ) * train_data[i][0][index]
return summation_value
def snake_case_ ( snake_case ) -> Dict:
lowercase__: Tuple = summation_of_cost_derivative(__lowercase , __lowercase ) / m
return cost_derivative_value
def snake_case_ ( ) -> Union[str, Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase__: str = 0.0_0_0_0_0_2
lowercase__: List[str] = 0
lowercase__: List[Any] = 0
while True:
j += 1
lowercase__: Dict = [0, 0, 0, 0]
for i in range(0 , len(__lowercase ) ):
lowercase__: Optional[Any] = get_cost_derivative(i - 1 )
lowercase__: str = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowercase , __lowercase , atol=__lowercase , rtol=__lowercase , ):
break
lowercase__: Dict = temp_parameter_vector
print(('Number of iterations:', j) )
def snake_case_ ( ) -> List[str]:
for i in range(len(__lowercase ) ):
print(('Actual output value:', output(__lowercase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(__lowercase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "decision_transformer"
__UpperCamelCase: Any = ["past_key_values"]
__UpperCamelCase: Dict = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , A : Optional[Any]=17 , A : str=4 , A : Dict=128 , A : int=4096 , A : List[Any]=True , A : Dict=1 , A : Union[str, Any]=1024 , A : Union[str, Any]=3 , A : Any=1 , A : List[Any]=None , A : Dict="relu" , A : Optional[Any]=0.1 , A : str=0.1 , A : List[str]=0.1 , A : List[Any]=1E-5 , A : List[Any]=0.02 , A : Tuple=True , A : List[str]=True , A : int=50256 , A : int=50256 , A : Tuple=False , A : Any=False , **A : List[Any] , ):
_UpperCAmelCase : Optional[int] = state_dim
_UpperCAmelCase : int = act_dim
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Any = max_ep_len
_UpperCAmelCase : Any = action_tanh
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[Any] = n_positions
_UpperCAmelCase : Dict = n_layer
_UpperCAmelCase : int = n_head
_UpperCAmelCase : Tuple = n_inner
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : str = resid_pdrop
_UpperCAmelCase : Union[str, Any] = embd_pdrop
_UpperCAmelCase : List[Any] = attn_pdrop
_UpperCAmelCase : Any = layer_norm_epsilon
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = scale_attn_weights
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : List[Any] = scale_attn_by_inverse_layer_idx
_UpperCAmelCase : List[Any] = reorder_and_upcast_attn
_UpperCAmelCase : Optional[int] = bos_token_id
_UpperCAmelCase : int = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A )
| 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = "distilbert"
__UpperCamelCase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self :Union[str, Any] , snake_case :List[Any]=30_522 , snake_case :Dict=512 , snake_case :int=False , snake_case :int=6 , snake_case :Union[str, Any]=12 , snake_case :Union[str, Any]=768 , snake_case :Union[str, Any]=4 * 768 , snake_case :Dict=0.1 , snake_case :List[Any]=0.1 , snake_case :Dict="gelu" , snake_case :Tuple=0.02 , snake_case :str=0.1 , snake_case :str=0.2 , snake_case :List[str]=0 , **snake_case :Tuple , ):
'''simple docstring'''
A_ : str = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Any = sinusoidal_pos_embds
A_ : List[str] = n_layers
A_ : Dict = n_heads
A_ : Any = dim
A_ : Union[str, Any] = hidden_dim
A_ : Union[str, Any] = dropout
A_ : Tuple = attention_dropout
A_ : Optional[Any] = activation
A_ : List[Any] = initializer_range
A_ : List[str] = qa_dropout
A_ : Union[str, Any] = seq_classif_dropout
super().__init__(**snake_case , pad_token_id=snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 300 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None ):
require_version(deps[pkg] , __lowercase ) | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
from collections.abc import Sequence
def A__ ( SCREAMING_SNAKE_CASE__ = None) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""")
__snake_case: Optional[Any] = nums[0]
for i in range(1 , len(__lowercase)):
__snake_case: Optional[Any] = nums[i]
__snake_case: Any = max(__lowercase , ans + num , __lowercase)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__UpperCAmelCase : List[Any] = int(input("Enter number of elements : ").strip())
__UpperCAmelCase : List[Any] = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 111 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: List[Any] = logging.get_logger(__name__)
A: int = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = "xlm"
__lowerCAmelCase : List[Any] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , _SCREAMING_SNAKE_CASE=30145 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2048**-0.5 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="first" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Any = emb_dim
UpperCAmelCase : Dict = n_layers
UpperCAmelCase : Optional[Any] = n_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Union[str, Any] = gelu_activation
UpperCAmelCase : Optional[int] = sinusoidal_embeddings
UpperCAmelCase : List[str] = causal
UpperCAmelCase : int = asm
UpperCAmelCase : Any = n_langs
UpperCAmelCase : List[str] = use_lang_emb
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Any = bos_index
UpperCAmelCase : Optional[int] = eos_index
UpperCAmelCase : Optional[Any] = pad_index
UpperCAmelCase : Optional[Any] = unk_index
UpperCAmelCase : List[str] = mask_index
UpperCAmelCase : Optional[int] = is_encoder
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[int] = embed_init_std
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : List[str] = summary_type
UpperCAmelCase : List[str] = summary_use_proj
UpperCAmelCase : Optional[int] = summary_activation
UpperCAmelCase : Optional[int] = summary_proj_to_labels
UpperCAmelCase : str = summary_first_dropout
UpperCAmelCase : List[Any] = start_n_top
UpperCAmelCase : Dict = end_n_top
UpperCAmelCase : str = mask_token_id
UpperCAmelCase : Any = lang_id
if "n_words" in kwargs:
UpperCAmelCase : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->Dict:
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : List[str] = jax.device_count()
_UpperCAmelCase : List[Any] = num_samples * [prompt]
_UpperCAmelCase : List[Any] = sd_pipe.prepare_inputs(lowerCAmelCase__ )
_UpperCAmelCase : int = replicate(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = shard(lowerCAmelCase__ )
_UpperCAmelCase : Any = jax.random.PRNGKey(0 )
_UpperCAmelCase : int = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCAmelCase : str = sd_pipe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_inference_steps=2_5 , jit=lowerCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_UpperCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Union[str, Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_UpperCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-2"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
_UpperCAmelCase , _UpperCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase__ , scheduler=lowerCAmelCase__ , revision="bf16" , dtype=jnp.bfloataa , )
_UpperCAmelCase : str = scheduler_params
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : List[Any] = jax.device_count()
_UpperCAmelCase : Union[str, Any] = num_samples * [prompt]
_UpperCAmelCase : Tuple = sd_pipe.prepare_inputs(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = replicate(lowerCAmelCase__ )
_UpperCAmelCase : int = shard(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 )
_UpperCAmelCase : Optional[int] = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCAmelCase : Dict = sd_pipe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_inference_steps=2_5 , jit=lowerCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_UpperCAmelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_UpperCAmelCase : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase : Tuple = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 145 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( __snake_case : Tuple ) -> Tuple:
lowercase : Optional[Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __magic_name__ ( __snake_case : Tuple , __snake_case : List[str] ) -> Optional[int]:
lowercase : Optional[Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __magic_name__ ( __snake_case : Dict ) -> Optional[int]:
lowercase : int = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def __magic_name__ ( ) -> List[str]:
lowercase : int = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __magic_name__ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Dict ) -> Optional[Any]:
lowercase : Optional[Any] = "imagenet-1k-id2label.json"
lowercase : Optional[Any] = 1000
lowercase : Optional[Any] = "huggingface/label-files"
lowercase : str = num_labels
lowercase : Optional[Any] = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type="dataset" ) ) , "r" ) )
lowercase : List[Any] = {int(__lowercase ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : List[str] = {v: k for k, v in idalabel.items()}
lowercase : List[str] = CvtConfig(num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowercase : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowercase : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase : Dict = [2, 2, 20]
lowercase : Optional[Any] = [3, 12, 16]
lowercase : Dict = [192, 768, 1024]
lowercase : Dict = CvtForImageClassification(__lowercase )
lowercase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowercase : Any = image_size
lowercase : int = torch.load(__lowercase , map_location=torch.device("cpu" ) )
lowercase : Optional[Any] = OrderedDict()
lowercase : int = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase : Optional[int] = list_of_state_dict + cls_token(__lowercase )
lowercase : Union[str, Any] = list_of_state_dict + embeddings(__lowercase )
for cnt in range(config.depth[idx] ):
lowercase : int = list_of_state_dict + attention(__lowercase , __lowercase )
lowercase : str = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowercase )
for i in range(len(__lowercase ) ):
lowercase : Optional[int] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you\'d like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_A : int = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 202 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : str , lowercase_ : List[str]=13 , lowercase_ : Any=7 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : Any=True , lowercase_ : Tuple=99 , lowercase_ : str=24 , lowercase_ : str=2 , lowercase_ : Any=6 , lowercase_ : Dict=37 , lowercase_ : List[str]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : List[Any]=16 , lowercase_ : str=2 , lowercase_ : int=0.02 , lowercase_ : List[Any]=3 , lowercase_ : List[Any]=None , lowercase_ : int=1_000 , ) -> Dict:
UpperCAmelCase : Dict = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : List[Any] = use_token_type_ids
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : Tuple = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Optional[Any] = range_bbox
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : Union[str, Any] = bbox[i, j, 3]
UpperCAmelCase : Optional[Any] = bbox[i, j, 1]
UpperCAmelCase : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : Optional[Any] = bbox[i, j, 2]
UpperCAmelCase : Dict = bbox[i, j, 0]
UpperCAmelCase : List[str] = t
UpperCAmelCase : List[Any] = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str , ) -> int:
UpperCAmelCase : Tuple = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : str = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : List[str] = model(lowercase_ , bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , ) -> int:
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : Optional[int] = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : int , ) -> Optional[Any]:
UpperCAmelCase : int = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Dict:
return True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = LiltModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = torch.tensor([[1, 2]] , device=lowercase_ )
UpperCAmelCase : Tuple = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Tuple = model(input_ids=lowercase_ , bbox=lowercase_ )
UpperCAmelCase : Optional[int] = torch.Size([1, 2, 768] )
UpperCAmelCase : List[str] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1E-3 ) )
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "realm"
def __init__( self : int , A : Tuple=30522 , A : Any=768 , A : str=128 , A : Dict=12 , A : Union[str, Any]=12 , A : int=8 , A : Union[str, Any]=3072 , A : List[str]="gelu_new" , A : int=0.1 , A : List[Any]=0.1 , A : Optional[int]=512 , A : Optional[int]=2 , A : Union[str, Any]=0.02 , A : Any=1E-12 , A : Optional[Any]=256 , A : Any=10 , A : List[Any]=1E-3 , A : Optional[int]=5 , A : List[str]=320 , A : List[Any]=13353718 , A : List[str]=5000 , A : int=1 , A : Optional[int]=0 , A : List[str]=2 , **A : Union[str, Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[Any] = retriever_proj_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[Any] = num_candidates
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : int = layer_norm_eps
# Reader config
_UpperCAmelCase : Optional[int] = span_hidden_size
_UpperCAmelCase : List[str] = max_span_width
_UpperCAmelCase : List[Any] = reader_layer_norm_eps
_UpperCAmelCase : int = reader_beam_size
_UpperCAmelCase : Dict = reader_seq_len
# Retrieval config
_UpperCAmelCase : Dict = num_block_records
_UpperCAmelCase : List[str] = searcher_beam_size
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Dict ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
A_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
A_ : Optional[Any] = data_files
if isinstance(snake_case , snake_case ):
A_ : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : List[Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A_ : Any = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
A_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ : int = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : int = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :List[str] ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
with open(snake_case , "rb" ) as f:
A_ : Dict = pa.Table.from_pandas(pd.read_pickle(snake_case ) )
yield i, self._cast_table(snake_case )
| 300 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Dict = [input_list.count(__lowercase ) for value in input_list]
_lowerCamelCase : int = max(__lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
A = sum(__lowercase ) / len(__lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
if hi < 0:
UpperCAmelCase : List[Any] = len(__lowercase )
while lo < hi:
UpperCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCAmelCase : List[str] = mid + 1
else:
UpperCAmelCase : Optional[int] = mid
return lo
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
if hi < 0:
UpperCAmelCase : Optional[int] = len(__lowercase )
while lo < hi:
UpperCAmelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCAmelCase : Optional[Any] = mid + 1
else:
UpperCAmelCase : Optional[Any] = mid
return lo
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
sorted_collection.insert(bisect_left(__lowercase , __lowercase , __lowercase , __lowercase ) , __lowercase )
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = -1 ):
sorted_collection.insert(bisect_right(__lowercase , __lowercase , __lowercase , __lowercase ) , __lowercase )
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int ):
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Dict = len(__lowercase ) - 1
while left <= right:
UpperCAmelCase : Tuple = left + (right - left) // 2
UpperCAmelCase : Optional[Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCAmelCase : Tuple = midpoint - 1
else:
UpperCAmelCase : str = midpoint + 1
return None
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int ):
UpperCAmelCase : int = bisect.bisect_left(__lowercase , __lowercase )
if index != len(__lowercase ) and sorted_collection[index] == item:
return index
return None
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ):
if right < left:
return None
UpperCAmelCase : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowercase , __lowercase , __lowercase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowercase , __lowercase , midpoint + 1 , __lowercase )
if __name__ == "__main__":
A: int = input("Enter numbers separated by comma:\n").strip()
A: Dict = sorted(int(item) for item in user_input.split(","))
A: Union[str, Any] = int(input("Enter a single number to be found in the list:\n"))
A: Dict = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 109 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = credit_card_number
a_ = 0
a_ = len(__lowercase ) - 2
for i in range(__lowercase , -1 , -2 ):
# double the value of every second digit
a_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a_ = cc_number[:i] + str(__lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__lowercase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__lowercase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__lowercase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323') | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCAmelCase ( a_: Dict, a_: str=0.9_99, a_: Union[str, Any]="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_: Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_: Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase : Any = []
for i in range(__lowercase ):
_UpperCAmelCase : Optional[int] = i / num_diffusion_timesteps
_UpperCAmelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowercase ) / alpha_bar_fn(__lowercase ), __lowercase ) )
return torch.tensor(__lowercase, dtype=torch.floataa )
class A__ ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase_ : List[str] = 2
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : int = 1_0_0_0 , lowerCAmelCase__ : float = 0.0_0085 , lowerCAmelCase__ : float = 0.012 , lowerCAmelCase__ : str = "linear" , lowerCAmelCase__ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ : str = "epsilon" , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : str = "linspace" , lowerCAmelCase__ : int = 0 , ) -> List[str]:
"""simple docstring"""
if trained_betas is not None:
_UpperCAmelCase : int = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Optional[int] = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : int = betas_for_alpha_bar(lowerCAmelCase__ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_UpperCAmelCase : Tuple = betas_for_alpha_bar(lowerCAmelCase__ , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase : Optional[int] = 1.0 - self.betas
_UpperCAmelCase : int = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = use_karras_sigmas
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=None ) -> Tuple:
"""simple docstring"""
if schedule_timesteps is None:
_UpperCAmelCase : List[Any] = self.timesteps
_UpperCAmelCase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : List[Any] = 1 if len(lowerCAmelCase__ ) > 1 else 0
else:
_UpperCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
_UpperCAmelCase : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Union[float, torch.FloatTensor] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.index_for_timestep(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.sigmas[step_index]
_UpperCAmelCase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, torch.device] = None , lowerCAmelCase__ : Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = num_inference_steps
_UpperCAmelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : Any = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Tuple = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : str = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" )
_UpperCAmelCase : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : Optional[int] = np.log(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
if self.config.use_karras_sigmas:
_UpperCAmelCase : int = self._convert_to_karras(in_sigmas=lowerCAmelCase__ , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : Any = np.array([self._sigma_to_t(lowerCAmelCase__ , lowerCAmelCase__ ) for sigma in sigmas] )
_UpperCAmelCase : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
_UpperCAmelCase : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase__ )
_UpperCAmelCase : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase__ ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : str = timesteps.to(lowerCAmelCase__ , dtype=torch.floataa )
else:
_UpperCAmelCase : Optional[int] = timesteps.to(device=lowerCAmelCase__ )
# empty dt and derivative
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : int = defaultdict(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = np.log(lowerCAmelCase__ )
# get distribution
_UpperCAmelCase : str = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : Optional[int] = low_idx + 1
_UpperCAmelCase : Optional[Any] = log_sigmas[low_idx]
_UpperCAmelCase : Union[str, Any] = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : int = (low - log_sigma) / (low - high)
_UpperCAmelCase : Any = np.clip(lowerCAmelCase__ , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : Tuple = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : List[str] = t.reshape(sigma.shape )
return t
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = in_sigmas[-1].item()
_UpperCAmelCase : List[Any] = in_sigmas[0].item()
_UpperCAmelCase : int = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : List[str] = np.linspace(0 , 1 , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = sigma_min ** (1 / rho)
_UpperCAmelCase : str = sigma_max ** (1 / rho)
_UpperCAmelCase : Optional[int] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.dt is None
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ : Union[float, torch.FloatTensor] , lowerCAmelCase__ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase__ : bool = True , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
_UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : Tuple = self.sigmas[step_index]
_UpperCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : int = self.sigmas[step_index - 1]
_UpperCAmelCase : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : Dict = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_UpperCAmelCase : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : List[str] = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : int = derivative
_UpperCAmelCase : List[Any] = dt
_UpperCAmelCase : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : str = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : Tuple = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Optional[Any] = self.dt
_UpperCAmelCase : Any = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : Any = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
_UpperCAmelCase : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = self.timesteps.to(original_samples.device )
_UpperCAmelCase : List[Any] = timesteps.to(original_samples.device )
_UpperCAmelCase : Union[str, Any] = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps]
_UpperCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Any = sigma.unsqueeze(-1 )
_UpperCAmelCase : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps | 145 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a__ :
def __init__( self , _a=None , _a=None ):
lowercase : int = list(poly_a or [0] )[:]
lowercase : List[str] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : int = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : List[Any] = self.__multiply()
def __magic_name__ ( self , _a ):
lowercase : str = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_a ) <= 1:
return dft[0]
#
lowercase : List[str] = self.c_max_length // 2
while next_ncol > 0:
lowercase : Tuple = [[] for i in range(_a )]
lowercase : str = self.root**next_ncol
# First half of next step
lowercase : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Any = new_dft
lowercase : Union[str, Any] = next_ncol // 2
return dft[0]
def __magic_name__ ( self ):
lowercase : Any = self.__dft("A" )
lowercase : Union[str, Any] = self.__dft("B" )
lowercase : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : List[Any] = 2
while next_ncol <= self.c_max_length:
lowercase : Dict = [[] for i in range(_a )]
lowercase : Optional[Any] = self.root ** (next_ncol // 2)
lowercase : List[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
lowercase : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
lowercase : int = "A = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] = "B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] = "A*B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case , _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = "maskformer-swin"
UpperCAmelCase_ : Any = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , lowercase_ : List[str]=224 , lowercase_ : List[str]=4 , lowercase_ : List[str]=3 , lowercase_ : Tuple=96 , lowercase_ : Optional[Any]=[2, 2, 6, 2] , lowercase_ : Optional[int]=[3, 6, 12, 24] , lowercase_ : Union[str, Any]=7 , lowercase_ : str=4.0 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Any=0.1 , lowercase_ : int="gelu" , lowercase_ : Union[str, Any]=False , lowercase_ : str=0.02 , lowercase_ : List[Any]=1E-5 , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , **lowercase_ : Union[str, Any] , ) -> int:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Optional[int] = embed_dim
UpperCAmelCase : Optional[Any] = depths
UpperCAmelCase : Optional[Any] = len(lowercase_ )
UpperCAmelCase : Tuple = num_heads
UpperCAmelCase : Tuple = window_size
UpperCAmelCase : List[str] = mlp_ratio
UpperCAmelCase : Dict = qkv_bias
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = drop_path_rate
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Dict = use_absolute_embeddings
UpperCAmelCase : Tuple = layer_norm_eps
UpperCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
UpperCAmelCase : List[str] = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 151 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 0 |
def snake_case_ ( snake_case ) -> int:
lowercase__: Union[str, Any] = abs(__lowercase )
lowercase__: Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case_ ( snake_case ) -> int:
lowercase__: Dict = abs(__lowercase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case_ ( snake_case ) -> int:
return sum(int(__lowercase ) for c in str(abs(__lowercase ) ) )
def snake_case_ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case , snake_case ) -> None:
lowercase__: Tuple = f'{func.__name__}({value})'
lowercase__: Optional[int] = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(__lowercase )} -- {timing:.4f} seconds' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowercase , __lowercase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
__SCREAMING_SNAKE_CASE : int = '''path-to-your-trained-model'''
__SCREAMING_SNAKE_CASE : str = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__SCREAMING_SNAKE_CASE : List[Any] = '''A photo of sks dog in a bucket'''
__SCREAMING_SNAKE_CASE : Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = None
__UpperCamelCase = 1
__UpperCamelCase = None
__UpperCamelCase = False
__UpperCamelCase = None
__UpperCamelCase = None
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case ) for k, v in self.__dict__.items()} )
| 300 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case ( lowercase__ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[Any] = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Tuple = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_lowerCamelCase : Union[str, Any] = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_lowerCamelCase : Optional[int] = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[Any] = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : str = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Optional[int] = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_lowerCamelCase : Optional[int] = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : int = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_lowerCamelCase : Optional[int] = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : Optional[int] = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Any = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Union[str, Any] = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_lowerCamelCase : str = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_lowerCamelCase : Any = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_lowerCamelCase : str = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_lowerCamelCase : int = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_lowerCamelCase : Tuple = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : Any = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : int = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_lowerCamelCase : Optional[int] = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def _snake_case ( lowercase__ , lowercase__ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Union[str, Any] = orig_state_dict.pop(__lowercase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Union[str, Any] = key.split('.' )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[str] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : Optional[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : List[str] = val[:dim]
_lowerCamelCase : List[Any] = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : List[str] = key.split('.' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Dict = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : Dict = val[:dim, :]
_lowerCamelCase : List[Any] = val[
dim : dim * 2, :
]
_lowerCamelCase : List[str] = val[-dim:, :]
else:
_lowerCamelCase : int = val[:dim]
_lowerCamelCase : Any = val[dim : dim * 2]
_lowerCamelCase : Dict = val[-dim:]
else:
_lowerCamelCase : Dict = rename_key(__lowercase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Dict = val.squeeze_()
else:
_lowerCamelCase : Tuple = val
return orig_state_dict
def _snake_case ( ):
_lowerCamelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ , lowercase__ , lowercase__="groupvit-gcc-yfcc" , lowercase__=False ):
_lowerCamelCase : Optional[Any] = GroupViTConfig()
_lowerCamelCase : int = GroupViTModel(__lowercase ).eval()
_lowerCamelCase : Any = torch.load(__lowercase , map_location='cpu' )['model']
_lowerCamelCase : Optional[int] = convert_state_dict(__lowercase , __lowercase )
_lowerCamelCase, _lowerCamelCase : int = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowercase ) == 0)
# verify result
_lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Tuple = processor(text=['a photo of a cat', 'a photo of a dog'] , images=__lowercase , padding=__lowercase , return_tensors='pt' )
with torch.no_grad():
_lowerCamelCase : Any = model(**__lowercase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : int = torch.tensor([[13.3523, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : List[str] = torch.tensor([[16.1873, 8.6_2_3_0]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , __lowercase , atol=1E-3 )
processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
print('Successfully saved processor and model to' , __lowercase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(__lowercase , organization='nielsr' )
model.push_to_hub(__lowercase , organization='nielsr' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
lowercase__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 6008_5147_5143 ):
try:
A = int(__lowercase )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A = 2
A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A = i
while n % i == 0:
A = n // i
i += 1
return int(__lowercase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase : Optional[int] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__UpperCAmelCase : Any = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__UpperCAmelCase : Optional[Any] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ConvBertTokenizer
def __init__( self : int , A : str=None , A : Optional[int]=None , A : int=True , A : int="[UNK]" , A : Dict="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : List[Any]="[MASK]" , A : str=True , A : Union[str, Any]=None , **A : List[str] , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
__snake_case: Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
__snake_case: int = getattr(A , normalizer_state.pop("""type""" ) )
__snake_case: Tuple = do_lower_case
__snake_case: List[Any] = strip_accents
__snake_case: List[Any] = tokenize_chinese_chars
__snake_case: List[Any] = normalizer_class(**A )
__snake_case: Dict = do_lower_case
def UpperCAmelCase__ ( self : Tuple , A : str , A : Optional[int]=None ):
__snake_case: Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Any = [self.sep_token_id]
__snake_case: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : List[str] , A : str , A : Optional[str] = None ):
__snake_case: Union[str, Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 111 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _snake_case ( UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None ):
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : str = field(
metadata={'help': 'The csv file to plot.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__lowerCAmelCase : Optional[List[str]] = list_field(
default=UpperCAmelCase__ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def _snake_case ( UpperCamelCase : Tuple ):
try:
int(__lowercase )
return True
except ValueError:
return False
def _snake_case ( UpperCamelCase : Tuple ):
try:
float(__lowercase )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = args
UpperCAmelCase : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
UpperCAmelCase : List[str] = csv.DictReader(_SCREAMING_SNAKE_CASE )
for row in reader:
UpperCAmelCase : Tuple = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
UpperCAmelCase : Dict = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
UpperCAmelCase : Dict = float(row["""result"""] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = plt.subplots()
UpperCAmelCase : int = """Time usage""" if self.args.is_time else """Memory usage"""
UpperCAmelCase : List[Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase : List[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
UpperCAmelCase : Optional[int] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
UpperCAmelCase : Dict = self.result_dict[model_name]["""result"""]
((UpperCAmelCase) , (UpperCAmelCase)) : Dict = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase : Union[str, Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase : List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase) , (UpperCAmelCase)) : Dict = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
UpperCAmelCase : Any = np.asarray(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[: len(_SCREAMING_SNAKE_CASE )]
plt.scatter(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """--""" )
title_str += F" {label_model_name} vs."
UpperCAmelCase : List[str] = title_str[:-4]
UpperCAmelCase : Tuple = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(_SCREAMING_SNAKE_CASE )
plt.xlabel(_SCREAMING_SNAKE_CASE )
plt.ylabel(_SCREAMING_SNAKE_CASE )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _snake_case ( ):
UpperCAmelCase : str = HfArgumentParser(__lowercase )
UpperCAmelCase : int = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase : Dict = Plot(args=__lowercase )
plot.plot()
if __name__ == "__main__":
main()
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase_ = numpy.array([0, 0])
UpperCamelCase_ = numpy.array([0.5, 0.8_66_02_54])
UpperCamelCase_ = numpy.array([1, 0])
UpperCamelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(__lowercase ):
a_ = iteration_step(__lowercase )
return vectors
def UpperCamelCase ( UpperCAmelCase ) ->list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(__lowercase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(__lowercase )
a_ , a_ = numpy.cos(__lowercase ), numpy.sin(__lowercase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__lowercase , __lowercase )
def UpperCamelCase ( UpperCAmelCase ) ->None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*__lowercase )
plt.plot(__lowercase , __lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray ):
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(__lowercase, __lowercase ) ) )
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Any = (
"Wrong input data\'s dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowercase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Any = (
"Wrong input data\'s shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowercase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Dict = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowercase )
_UpperCAmelCase : List[str] = []
for value in value_array:
_UpperCAmelCase : Optional[Any] = euclidean(__lowercase, dataset[0] )
_UpperCAmelCase : Optional[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : List[str] = euclidean(__lowercase, __lowercase )
if dist > temp_dist:
_UpperCAmelCase : Optional[Any] = temp_dist
_UpperCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray ):
return np.dot(__lowercase, __lowercase ) / (norm(__lowercase ) * norm(__lowercase ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 145 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __magic_name__ ( __snake_case : Iterable[str] , __snake_case : int ) -> Generator[tuple[str, ...], None, None]:
lowercase : Dict = iter(__lowercase )
while True:
lowercase : str = tuple(itertools.islice(__lowercase , __lowercase ) )
if not chunk:
return
yield chunk
def __magic_name__ ( __snake_case : str ) -> str:
lowercase : str = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase : int = ""
if len(__lowercase ) < 2:
return dirty
for i in range(len(__lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowercase ) & 1:
clean += "X"
return clean
def __magic_name__ ( __snake_case : str ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowercase : Dict = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowercase )
return table
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> str:
lowercase : int = generate_table(__lowercase )
lowercase : int = prepare_input(__lowercase )
lowercase : Optional[int] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
lowercase , lowercase : str = divmod(table.index(__lowercase ) , 5 )
lowercase , lowercase : Optional[int] = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> str:
lowercase : Dict = generate_table(__lowercase )
lowercase : Any = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
lowercase , lowercase : str = divmod(table.index(__lowercase ) , 5 )
lowercase , lowercase : int = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 202 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase__ = 100
lowercase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCamelCase( UpperCAmelCase_ ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
UpperCAmelCase : Union[str, Any] = set()
UpperCAmelCase : str = 42
UpperCAmelCase : Dict = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase( UpperCAmelCase_ = 50_00 ):
for number_to_partition in range(1 , __lowercase ):
if len(partition(__lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
import datasets
from .evaluate import evaluate
__lowerCAmelCase = '''\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
'''
__lowerCAmelCase = '''
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
'''
__lowerCAmelCase = '''
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the SQuAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]
>>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]
>>> squad_metric = datasets.load_metric("squad")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[str] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
lowercase__: List[str] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
lowercase__: List[str] = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 196 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , A : Optional[Any] , A : int=14 , A : Tuple=7 , A : str=True , A : Tuple=True , A : Optional[int]=True , A : Tuple=True , A : List[str]=True , A : List[str]=99 , A : Optional[Any]=32 , A : str=5 , A : Tuple=4 , A : str=37 , A : Any="gelu" , A : Any=0.1 , A : str=0.1 , A : int=512 , A : List[str]=16 , A : List[Any]=2 , A : List[str]=0.02 , A : List[Any]=3 , A : Any=4 , A : int=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : List[str] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Optional[Any] = use_mc_token_ids
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : Tuple = type_sequence_label_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : Optional[Any] = self.vocab_size - 1
def _A ( self : List[str] ):
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : str = None
if self.use_mc_token_ids:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = self.get_config()
_UpperCAmelCase : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Optional[Any] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : int , A : Dict , A : Optional[int] , A : int , A : Optional[int] , A : str , *A : Any ):
_UpperCAmelCase : str = CTRLModel(config=A )
model.to(A )
model.eval()
model(A , token_type_ids=A , head_mask=A )
model(A , token_type_ids=A )
_UpperCAmelCase : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : Dict , A : Any , A : int , A : List[str] , A : Dict , A : int , *A : List[Any] ):
_UpperCAmelCase : Dict = CTRLLMHeadModel(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self : Optional[Any] , A : List[str] , A : List[Any] , A : List[Any] , A : Union[str, Any] , *A : Optional[int] ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Union[str, Any] = CTRLForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase: Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCamelCase: Dict = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: str = True
__UpperCamelCase: int = False
__UpperCamelCase: Any = False
def _A ( self : List[str] , A : int , A : Union[str, Any] , A : Tuple , A : int , A : Tuple ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : List[str] ):
_UpperCAmelCase : List[str] = CTRLModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=A , n_embd=37 )
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _A ( self : Dict ):
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A )
def _A ( self : Tuple ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : Optional[Any] ):
pass
@slow
def _A ( self : List[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[int] = CTRLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip("The model doesn\'t support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self : Tuple ):
pass
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : str ):
_UpperCAmelCase : List[Any] = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(A )
_UpperCAmelCase : Tuple = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=A ) # Legal the president is
_UpperCAmelCase : Dict = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_UpperCAmelCase : Any = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].tolist() , A )
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> list:
A_ : Optional[int] = []
A_ , A_ : str = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
A_ : Union[str, Any] = result + left + right
return input_list
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(__lowercase ) <= 1:
return input_list
A_ : List[Any] = list(__lowercase )
# iteration for two-way merging
A_ : int = 2
while p <= len(__lowercase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowercase ) , __lowercase ):
A_ : Union[str, Any] = i
A_ : Optional[int] = i + p - 1
A_ : Any = (low + high + 1) // 2
A_ : List[str] = merge(__lowercase , __lowercase , __lowercase , __lowercase )
# final merge of last two parts
if p * 2 >= len(__lowercase ):
A_ : Union[str, Any] = i
A_ : Union[str, Any] = merge(__lowercase , 0 , __lowercase , len(__lowercase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
_lowerCAmelCase : List[str] = []
else:
_lowerCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 300 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Optional[Any] = nn.ModuleList(lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_lowerCamelCase, _lowerCamelCase : List[str] = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Dict = down_samples, mid_sample
else:
_lowerCamelCase : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A_ ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , ):
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_lowerCamelCase : Optional[Any] = model_path_to_save + F'''_{idx}'''
@classmethod
def A_ ( cls , lowercase , **lowercase ):
_lowerCamelCase : Tuple = 0
_lowerCamelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Tuple = pretrained_model_path
while os.path.isdir(lowercase ):
_lowerCamelCase : Any = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_lowerCamelCase : Optional[int] = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : nn.ModuleList , snake_case__ : nn.ModuleList , snake_case__ : List[int] ):
A = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowercase ) == len(__lowercase ), F'{len(__lowercase )} != {len(__lowercase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_lowercase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_lowercase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
try:
A = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(__lowercase ) )
def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[Any] ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(__lowercase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _snake_case ( snake_case__ : Union[str, PreTrainedModel] , snake_case__ : Union[str, Path] = "student" , snake_case__ : Union[int, None] = None , snake_case__ : Union[int, None] = None , snake_case__ : List[str]=False , snake_case__ : Tuple=None , snake_case__ : str=None , **snake_case__ : Any , ):
A = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowercase , __lowercase ):
AutoTokenizer.from_pretrained(__lowercase ).save_pretrained(__lowercase ) # purely for convenience
A = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).eval()
else:
assert isinstance(__lowercase , __lowercase ), F'teacher must be a model or string got type {type(__lowercase )}'
A = teacher.config.to_diff_dict()
try:
A , A = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A = teacher_e
if d is None:
A = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
A , A = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A , A = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A = teacher_e
if d is None:
A = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowercase )
# Copy weights
A = teacher.config_class(**__lowercase )
A = AutoModelForSeqaSeqLM.from_config(__lowercase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A = student.load_state_dict(teacher.state_dict() , strict=__lowercase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A , A = list(range(__lowercase ) ), list(range(__lowercase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(__lowercase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A = pick_layers_to_copy(__lowercase , __lowercase )
if d_layers_to_copy is None:
A = pick_layers_to_copy(__lowercase , __lowercase )
try:
if hasattr(
__lowercase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowercase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowercase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
A = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowercase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: str = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__snake_case: List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__snake_case: List[Any] = """The dog is cute and lives in the garden house"""
__snake_case: List[str] = jnp.array([tokenizer.encode(A )] )
__snake_case: List[str] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__snake_case: Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__snake_case: Dict = model(A )["""last_hidden_state"""]
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 111 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = start
UpperCAmelCase : Tuple = end
UpperCAmelCase : Tuple = val
UpperCAmelCase : str = (start + end) // 2
UpperCAmelCase : Any = left
UpperCAmelCase : Dict = right
def __repr__( self ) -> Optional[int]:
'''simple docstring'''
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = collection
UpperCAmelCase : Any = function
if self.collection:
UpperCAmelCase : Optional[Any] = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
UpperCAmelCase : str = (start + end) // 2
UpperCAmelCase : str = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if node.start == i and node.end == i:
UpperCAmelCase : int = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.fn(node.left.val , node.right.val )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self.root is not None:
UpperCAmelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase : Any = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 5_0)
A: Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 109 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : str , lowercase : List[str]=13 , lowercase : Any=7 , lowercase : Dict=True , lowercase : str=True , lowercase : List[Any]=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : str=24 , lowercase : str=2 , lowercase : Any=6 , lowercase : Dict=37 , lowercase : List[str]="gelu" , lowercase : Dict=0.1 , lowercase : Tuple=0.1 , lowercase : Optional[Any]=512 , lowercase : List[Any]=16 , lowercase : str=2 , lowercase : int=0.02 , lowercase : List[Any]=3 , lowercase : List[Any]=None , lowercase : int=1_000 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = scope
_snake_case = range_bbox
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : List[str] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : str , lowercase : Tuple , lowercase : Tuple , lowercase : str , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : str , ):
'''simple docstring'''
_snake_case = LiltModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase , token_type_ids=lowercase )
_snake_case = model(lowercase , bbox=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : List[Any] , lowercase : int , lowercase : int , lowercase : Any , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Optional[int] , ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = LiltForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict , lowercase : Optional[int] , lowercase : List[str] , lowercase : int , lowercase : int , ):
'''simple docstring'''
_snake_case = LiltForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(
lowercase , bbox=lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[str] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Dict , lowercase : Dict , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
return True
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = LiltModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LiltModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(lowercase )
_snake_case = torch.tensor([[1, 2]] , device=lowercase )
_snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(input_ids=lowercase , bbox=lowercase )
_snake_case = torch.Size([1, 2, 768] )
_snake_case = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCamelCase_ = logging.getLogger(__name__)
def UpperCamelCase ( ) ->Union[str, Any]:
"""simple docstring"""
a_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowercase , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowercase , type=__lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
a_ = parser.parse_args()
return args
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
def fn(UpperCAmelCase ):
return tokenizer(examples["text"] )
return fn
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
a_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a_ = tf.train.Features(feature=__lowercase )
a_ = tf.train.Example(features=__lowercase )
a_ = example.SerializeToString()
records.append(__lowercase )
return records
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a_ = min(len(__lowercase ) , args.limit )
a_ = dataset.select(range(__lowercase ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
a_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowercase ):
os.makedirs(__lowercase )
else:
a_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a_ = tokenize_function(__lowercase )
a_ = dataset.map(__lowercase , batched=__lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase ):
# Concatenate all texts.
a_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a_ = {
k: [t[i : i + args.max_length] for i in range(0 , __lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a_ = dataset_tokenized.map(__lowercase , batched=__lowercase , batch_size=1_000 , num_proc=4 )
a_ = 0
a_ = 0
for shard in range(0 , len(__lowercase ) , args.shard_size ):
a_ = grouped_dataset[shard : shard + args.shard_size]
a_ = len(dataset_snapshot["input_ids"] )
a_ = os.path.join(__lowercase , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a_ = get_serialized_examples(__lowercase )
with tf.io.TFRecordWriter(__lowercase ) as out_file:
for i in range(len(__lowercase ) ):
a_ = serialized_examples[i]
out_file.write(__lowercase )
print("Wrote file {} containing {} records".format(__lowercase , __lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , "w" ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=__lowercase )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args) | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.